1
0
Fork 0

upgrade rocksdb to V5.18.X (#7792)

This commit is contained in:
Wilfried Goesgens 2018-12-19 19:34:55 +01:00 committed by Jan
parent c30947961f
commit c3e73ba45b
1394 changed files with 23491 additions and 6896 deletions

111
3rdParty/README_maintainers.md vendored Normal file
View File

@ -0,0 +1,111 @@
3rd Party components and what to do on update
=============================================
## asio
https://think-async.com/Asio/
## boost
https://www.boost.org/
(we don't ship the upstream documentation!)
## catch
https://github.com/catchorg/Catch2
## cmake
Custom boost locator
## curl
HTTP client library https://curl.haxx.se/
We apply several build system patches to avoid unneccessary recompiles, bugfixes.
## date
Forward port of C++20 date/time class
## fakeit
Mocking library. https://github.com/eranpeer/FakeIt
## fuerte
Our C++ Client driver capable of velocystream. Maintained at https://github.com/arangodb/fuerte
## iresearch
This contains the iresearch library and its sub-components, ICU is used from V8.
## iresearch.build
This contains statically generated files for the IResearch folder, and replaces them.
## jemalloc
Only used on Linux/Mac, still uses autofoo.
## linenoise-ng
Our maintained fork of linenoise
https://github.com/arangodb/linenoise-ng
We may want to switch to replxx (uniting our & other forks):
https://github.com/AmokHuginnsson/replxx
## lz4
https://github.com/lz4/lz4
## rocksdb
our branch is maintained at:
https://github.com/arangodb-helper/rocksdb
Most changes can be ported upstream:
https://github.com/facebook/rocksdb
On Upgrade:
- thirdparty.inc needs to be adjusted to use the snappy we specify
## s2geometry
http://s2geometry.io/
## snappy
Compression library
https://github.com/google/snappy
## snowball
http://snowball.tartarus.org/ stemming for IResearch. We use the latest provided cmake which we maintain.
## V8
Javascript interpreter.
This is maintained via https://github.com/arangodb-helper/v8
Upstream is: https://chromium.googlesource.com/v8/v8.git
- On upgrade the ICU data file(s) need to be replaced with ICU upstream,
since the V8 copy doesn't contain all locales (known as full-icu, ~25 MB icudt*.dat).
## velocypack
our fast and compact format for serialization and storage
Maintained at:
https://github.com/arangodb/velocypack/
## zlib
ZLib compression library https://zlib.net/

View File

@ -4,7 +4,7 @@ project(rocksdb C CXX)
set(FAIL_ON_WARNINGS OFF CACHE BOOL "do not enable -Werror")
set(ARANGO_ROCKSDB_VERSION "v5.16.X")
set(ARANGO_ROCKSDB_VERSION "v5.18.X")
set(ARANGO_ROCKSDB_VERSION "${ARANGO_ROCKSDB_VERSION}" PARENT_SCOPE)
# jemalloc settings

View File

@ -1 +1 @@
v5.16.X/cmake/RocksDBConfig.cmake.in
v5.18.X/cmake/RocksDBConfig.cmake.in

View File

@ -1,52 +0,0 @@
// Copyright (c) 2018-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include "monitoring/instrumented_mutex.h"
#include "options/db_options.h"
#include "rocksdb/listener.h"
#include "rocksdb/status.h"
namespace rocksdb {
class ErrorHandler {
public:
ErrorHandler(const ImmutableDBOptions& db_options,
InstrumentedMutex* db_mutex)
: db_options_(db_options),
bg_error_(Status::OK()),
db_mutex_(db_mutex)
{}
~ErrorHandler() {}
Status::Severity GetErrorSeverity(BackgroundErrorReason reason,
Status::Code code, Status::SubCode subcode);
Status SetBGError(const Status& bg_err, BackgroundErrorReason reason);
Status GetBGError()
{
return bg_error_;
}
void ClearBGError() {
bg_error_ = Status::OK();
}
bool IsDBStopped() {
return !bg_error_.ok();
}
bool IsBGWorkStopped() {
return !bg_error_.ok();
}
private:
const ImmutableDBOptions& db_options_;
Status bg_error_;
InstrumentedMutex* db_mutex_;
};
}

View File

@ -1,138 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
#include "db/db_test_util.h"
#include "port/stack_trace.h"
#include "rocksdb/perf_context.h"
#include "util/fault_injection_test_env.h"
#if !defined(ROCKSDB_LITE)
#include "util/sync_point.h"
#endif
namespace rocksdb {
class DBErrorHandlingTest : public DBTestBase {
public:
DBErrorHandlingTest() : DBTestBase("/db_error_handling_test") {}
};
class DBErrorHandlingEnv : public EnvWrapper {
public:
DBErrorHandlingEnv() : EnvWrapper(Env::Default()),
trig_no_space(false), trig_io_error(false) {}
void SetTrigNoSpace() {trig_no_space = true;}
void SetTrigIoError() {trig_io_error = true;}
private:
bool trig_no_space;
bool trig_io_error;
};
TEST_F(DBErrorHandlingTest, FLushWriteError) {
std::unique_ptr<FaultInjectionTestEnv> fault_env(
new FaultInjectionTestEnv(Env::Default()));
Options options = GetDefaultOptions();
options.create_if_missing = true;
options.env = fault_env.get();
Status s;
DestroyAndReopen(options);
Put(Key(0), "va;");
SyncPoint::GetInstance()->SetCallBack(
"FlushJob::Start", [&](void *) {
fault_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
});
SyncPoint::GetInstance()->EnableProcessing();
s = Flush();
ASSERT_EQ(s.severity(), rocksdb::Status::Severity::kSoftError);
fault_env->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
Destroy(options);
}
TEST_F(DBErrorHandlingTest, CompactionWriteError) {
std::unique_ptr<FaultInjectionTestEnv> fault_env(
new FaultInjectionTestEnv(Env::Default()));
Options options = GetDefaultOptions();
options.create_if_missing = true;
options.level0_file_num_compaction_trigger = 2;
options.env = fault_env.get();
Status s;
DestroyAndReopen(options);
Put(Key(0), "va;");
Put(Key(2), "va;");
s = Flush();
ASSERT_EQ(s, Status::OK());
rocksdb::SyncPoint::GetInstance()->LoadDependency(
{{"FlushMemTableFinished", "BackgroundCallCompaction:0"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"BackgroundCallCompaction:0", [&](void *) {
fault_env->SetFilesystemActive(false, Status::NoSpace("Out of space"));
});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Put(Key(1), "val");
s = Flush();
ASSERT_EQ(s, Status::OK());
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), rocksdb::Status::Severity::kSoftError);
fault_env->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_EQ(s, Status::OK());
Destroy(options);
}
TEST_F(DBErrorHandlingTest, CorruptionError) {
std::unique_ptr<FaultInjectionTestEnv> fault_env(
new FaultInjectionTestEnv(Env::Default()));
Options options = GetDefaultOptions();
options.create_if_missing = true;
options.level0_file_num_compaction_trigger = 2;
options.env = fault_env.get();
Status s;
DestroyAndReopen(options);
Put(Key(0), "va;");
Put(Key(2), "va;");
s = Flush();
ASSERT_EQ(s, Status::OK());
rocksdb::SyncPoint::GetInstance()->LoadDependency(
{{"FlushMemTableFinished", "BackgroundCallCompaction:0"}});
rocksdb::SyncPoint::GetInstance()->SetCallBack(
"BackgroundCallCompaction:0", [&](void *) {
fault_env->SetFilesystemActive(false, Status::Corruption("Corruption"));
});
rocksdb::SyncPoint::GetInstance()->EnableProcessing();
Put(Key(1), "val");
s = Flush();
ASSERT_EQ(s, Status::OK());
s = dbfull()->TEST_WaitForCompact();
ASSERT_EQ(s.severity(), rocksdb::Status::Severity::kUnrecoverableError);
fault_env->SetFilesystemActive(true);
s = dbfull()->Resume();
ASSERT_NE(s, Status::OK());
Destroy(options);
}
} // namespace rocksdb
int main(int argc, char** argv) {
rocksdb::port::InstallStackTraceHandler();
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,238 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include <algorithm>
#include <map>
#include <string>
#include "db/column_family.h"
#include "db/flush_job.h"
#include "db/version_set.h"
#include "rocksdb/cache.h"
#include "rocksdb/write_buffer_manager.h"
#include "table/mock_table.h"
#include "util/file_reader_writer.h"
#include "util/string_util.h"
#include "util/testharness.h"
#include "util/testutil.h"
namespace rocksdb {
// TODO(icanadi) Mock out everything else:
// 1. VersionSet
// 2. Memtable
class FlushJobTest : public testing::Test {
public:
FlushJobTest()
: env_(Env::Default()),
dbname_(test::PerThreadDBPath("flush_job_test")),
options_(),
db_options_(options_),
table_cache_(NewLRUCache(50000, 16)),
write_buffer_manager_(db_options_.db_write_buffer_size),
versions_(new VersionSet(dbname_, &db_options_, env_options_,
table_cache_.get(), &write_buffer_manager_,
&write_controller_)),
shutting_down_(false),
mock_table_factory_(new mock::MockTableFactory()) {
EXPECT_OK(env_->CreateDirIfMissing(dbname_));
db_options_.db_paths.emplace_back(dbname_,
std::numeric_limits<uint64_t>::max());
db_options_.statistics = rocksdb::CreateDBStatistics();
// TODO(icanadi) Remove this once we mock out VersionSet
NewDB();
std::vector<ColumnFamilyDescriptor> column_families;
cf_options_.table_factory = mock_table_factory_;
column_families.emplace_back(kDefaultColumnFamilyName, cf_options_);
EXPECT_OK(versions_->Recover(column_families, false));
}
void NewDB() {
VersionEdit new_db;
new_db.SetLogNumber(0);
new_db.SetNextFile(2);
new_db.SetLastSequence(0);
const std::string manifest = DescriptorFileName(dbname_, 1);
unique_ptr<WritableFile> file;
Status s = env_->NewWritableFile(
manifest, &file, env_->OptimizeForManifestWrite(env_options_));
ASSERT_OK(s);
unique_ptr<WritableFileWriter> file_writer(
new WritableFileWriter(std::move(file), EnvOptions()));
{
log::Writer log(std::move(file_writer), 0, false);
std::string record;
new_db.EncodeTo(&record);
s = log.AddRecord(record);
}
ASSERT_OK(s);
// Make "CURRENT" file that points to the new manifest file.
s = SetCurrentFile(env_, dbname_, 1, nullptr);
}
Env* env_;
std::string dbname_;
EnvOptions env_options_;
Options options_;
ImmutableDBOptions db_options_;
std::shared_ptr<Cache> table_cache_;
WriteController write_controller_;
WriteBufferManager write_buffer_manager_;
ColumnFamilyOptions cf_options_;
std::unique_ptr<VersionSet> versions_;
InstrumentedMutex mutex_;
std::atomic<bool> shutting_down_;
std::shared_ptr<mock::MockTableFactory> mock_table_factory_;
};
TEST_F(FlushJobTest, Empty) {
JobContext job_context(0);
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
EventLogger event_logger(db_options_.info_log.get());
SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job(
dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_,
*cfd->GetLatestMutableCFOptions(), env_options_, versions_.get(), &mutex_,
&shutting_down_, {}, kMaxSequenceNumber, snapshot_checker, &job_context,
nullptr, nullptr, nullptr, kNoCompression, nullptr, &event_logger, false);
{
InstrumentedMutexLock l(&mutex_);
flush_job.PickMemTable();
ASSERT_OK(flush_job.Run());
}
job_context.Clean();
}
TEST_F(FlushJobTest, NonEmpty) {
JobContext job_context(0);
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
kMaxSequenceNumber);
new_mem->Ref();
auto inserted_keys = mock::MakeMockFile();
// Test data:
// seqno [ 1, 2 ... 8998, 8999, 9000, 9001, 9002 ... 9999 ]
// key [ 1001, 1002 ... 9998, 9999, 0, 1, 2 ... 999 ]
// range-delete "9995" -> "9999" at seqno 10000
for (int i = 1; i < 10000; ++i) {
std::string key(ToString((i + 1000) % 10000));
std::string value("value" + key);
new_mem->Add(SequenceNumber(i), kTypeValue, key, value);
if ((i + 1000) % 10000 < 9995) {
InternalKey internal_key(key, SequenceNumber(i), kTypeValue);
inserted_keys.insert({internal_key.Encode().ToString(), value});
}
}
new_mem->Add(SequenceNumber(10000), kTypeRangeDeletion, "9995", "9999a");
InternalKey internal_key("9995", SequenceNumber(10000), kTypeRangeDeletion);
inserted_keys.insert({internal_key.Encode().ToString(), "9999a"});
autovector<MemTable*> to_delete;
cfd->imm()->Add(new_mem, &to_delete);
for (auto& m : to_delete) {
delete m;
}
EventLogger event_logger(db_options_.info_log.get());
SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
db_options_, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_,
{}, kMaxSequenceNumber, snapshot_checker, &job_context,
nullptr, nullptr, nullptr, kNoCompression,
db_options_.statistics.get(), &event_logger, true);
HistogramData hist;
FileMetaData file_meta;
mutex_.Lock();
flush_job.PickMemTable();
ASSERT_OK(flush_job.Run(nullptr, &file_meta));
mutex_.Unlock();
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
ASSERT_GT(hist.average, 0.0);
ASSERT_EQ(ToString(0), file_meta.smallest.user_key().ToString());
ASSERT_EQ(
"9999a",
file_meta.largest.user_key().ToString()); // range tombstone end key
ASSERT_EQ(1, file_meta.fd.smallest_seqno);
ASSERT_EQ(10000, file_meta.fd.largest_seqno); // range tombstone seqnum 10000
mock_table_factory_->AssertSingleFile(inserted_keys);
job_context.Clean();
}
TEST_F(FlushJobTest, Snapshots) {
JobContext job_context(0);
auto cfd = versions_->GetColumnFamilySet()->GetDefault();
auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(),
kMaxSequenceNumber);
std::vector<SequenceNumber> snapshots;
std::set<SequenceNumber> snapshots_set;
int keys = 10000;
int max_inserts_per_keys = 8;
Random rnd(301);
for (int i = 0; i < keys / 2; ++i) {
snapshots.push_back(rnd.Uniform(keys * (max_inserts_per_keys / 2)) + 1);
snapshots_set.insert(snapshots.back());
}
std::sort(snapshots.begin(), snapshots.end());
new_mem->Ref();
SequenceNumber current_seqno = 0;
auto inserted_keys = mock::MakeMockFile();
for (int i = 1; i < keys; ++i) {
std::string key(ToString(i));
int insertions = rnd.Uniform(max_inserts_per_keys);
for (int j = 0; j < insertions; ++j) {
std::string value(test::RandomHumanReadableString(&rnd, 10));
auto seqno = ++current_seqno;
new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value);
// a key is visible only if:
// 1. it's the last one written (j == insertions - 1)
// 2. there's a snapshot pointing at it
bool visible = (j == insertions - 1) ||
(snapshots_set.find(seqno) != snapshots_set.end());
if (visible) {
InternalKey internal_key(key, seqno, kTypeValue);
inserted_keys.insert({internal_key.Encode().ToString(), value});
}
}
}
autovector<MemTable*> to_delete;
cfd->imm()->Add(new_mem, &to_delete);
for (auto& m : to_delete) {
delete m;
}
EventLogger event_logger(db_options_.info_log.get());
SnapshotChecker* snapshot_checker = nullptr; // not relavant
FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(),
db_options_, *cfd->GetLatestMutableCFOptions(),
env_options_, versions_.get(), &mutex_, &shutting_down_,
snapshots, kMaxSequenceNumber, snapshot_checker,
&job_context, nullptr, nullptr, nullptr, kNoCompression,
db_options_.statistics.get(), &event_logger, true);
mutex_.Lock();
flush_job.PickMemTable();
ASSERT_OK(flush_job.Run());
mutex_.Unlock();
mock_table_factory_->AssertSingleFile(inserted_keys);
HistogramData hist;
db_options_.statistics->histogramData(FLUSH_TIME, &hist);
ASSERT_GT(hist.average, 0.0);
job_context.Clean();
}
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,615 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "db/memtable_list.h"
#include <algorithm>
#include <string>
#include <vector>
#include "db/merge_context.h"
#include "db/range_del_aggregator.h"
#include "db/version_set.h"
#include "db/write_controller.h"
#include "rocksdb/db.h"
#include "rocksdb/status.h"
#include "rocksdb/write_buffer_manager.h"
#include "util/string_util.h"
#include "util/testharness.h"
#include "util/testutil.h"
namespace rocksdb {
class MemTableListTest : public testing::Test {
public:
std::string dbname;
DB* db;
Options options;
MemTableListTest() : db(nullptr) {
dbname = test::PerThreadDBPath("memtable_list_test");
}
// Create a test db if not yet created
void CreateDB() {
if (db == nullptr) {
options.create_if_missing = true;
DestroyDB(dbname, options);
Status s = DB::Open(options, dbname, &db);
EXPECT_OK(s);
}
}
~MemTableListTest() {
if (db) {
delete db;
DestroyDB(dbname, options);
}
}
// Calls MemTableList::InstallMemtableFlushResults() and sets up all
// structures needed to call this function.
Status Mock_InstallMemtableFlushResults(
MemTableList* list, const MutableCFOptions& mutable_cf_options,
const autovector<MemTable*>& m, autovector<MemTable*>* to_delete) {
// Create a mock Logger
test::NullLogger logger;
LogBuffer log_buffer(DEBUG_LEVEL, &logger);
// Create a mock VersionSet
DBOptions db_options;
ImmutableDBOptions immutable_db_options(db_options);
EnvOptions env_options;
shared_ptr<Cache> table_cache(NewLRUCache(50000, 16));
WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size);
WriteController write_controller(10000000u);
CreateDB();
VersionSet versions(dbname, &immutable_db_options, env_options,
table_cache.get(), &write_buffer_manager,
&write_controller);
// Create mock default ColumnFamilyData
ColumnFamilyOptions cf_options;
std::vector<ColumnFamilyDescriptor> column_families;
column_families.emplace_back(kDefaultColumnFamilyName, cf_options);
EXPECT_OK(versions.Recover(column_families, false));
auto column_family_set = versions.GetColumnFamilySet();
auto cfd = column_family_set->GetColumnFamily(0);
EXPECT_TRUE(cfd != nullptr);
// Create dummy mutex.
InstrumentedMutex mutex;
InstrumentedMutexLock l(&mutex);
LogsWithPrepTracker dummy_prep_tracker;
return list->InstallMemtableFlushResults(
cfd, mutable_cf_options, m, &dummy_prep_tracker, &versions, &mutex, 1,
to_delete, nullptr, &log_buffer);
}
};
TEST_F(MemTableListTest, Empty) {
// Create an empty MemTableList and validate basic functions.
MemTableList list(1, 0);
ASSERT_EQ(0, list.NumNotFlushed());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
ASSERT_FALSE(list.IsFlushPending());
autovector<MemTable*> mems;
list.PickMemtablesToFlush(&mems);
ASSERT_EQ(0, mems.size());
autovector<MemTable*> to_delete;
list.current()->Unref(&to_delete);
ASSERT_EQ(0, to_delete.size());
}
TEST_F(MemTableListTest, GetTest) {
// Create MemTableList
int min_write_buffer_number_to_merge = 2;
int max_write_buffer_number_to_maintain = 0;
MemTableList list(min_write_buffer_number_to_merge,
max_write_buffer_number_to_maintain);
SequenceNumber seq = 1;
std::string value;
Status s;
MergeContext merge_context;
InternalKeyComparator ikey_cmp(options.comparator);
RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */);
autovector<MemTable*> to_delete;
LookupKey lkey("key1", seq);
bool found = list.current()->Get(lkey, &value, &s, &merge_context,
&range_del_agg, ReadOptions());
ASSERT_FALSE(found);
// Create a MemTable
InternalKeyComparator cmp(BytewiseComparator());
auto factory = std::make_shared<SkipListFactory>();
options.memtable_factory = factory;
ImmutableCFOptions ioptions(options);
WriteBufferManager wb(options.db_write_buffer_size);
MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
kMaxSequenceNumber, 0 /* column_family_id */);
mem->Ref();
// Write some keys to this memtable.
mem->Add(++seq, kTypeDeletion, "key1", "");
mem->Add(++seq, kTypeValue, "key2", "value2");
mem->Add(++seq, kTypeValue, "key1", "value1");
mem->Add(++seq, kTypeValue, "key2", "value2.2");
// Fetch the newly written keys
merge_context.Clear();
found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context,
&range_del_agg, ReadOptions());
ASSERT_TRUE(s.ok() && found);
ASSERT_EQ(value, "value1");
merge_context.Clear();
found = mem->Get(LookupKey("key1", 2), &value, &s, &merge_context,
&range_del_agg, ReadOptions());
// MemTable found out that this key is *not* found (at this sequence#)
ASSERT_TRUE(found && s.IsNotFound());
merge_context.Clear();
found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context,
&range_del_agg, ReadOptions());
ASSERT_TRUE(s.ok() && found);
ASSERT_EQ(value, "value2.2");
ASSERT_EQ(4, mem->num_entries());
ASSERT_EQ(1, mem->num_deletes());
// Add memtable to list
list.Add(mem, &to_delete);
SequenceNumber saved_seq = seq;
// Create another memtable and write some keys to it
WriteBufferManager wb2(options.db_write_buffer_size);
MemTable* mem2 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb2,
kMaxSequenceNumber, 0 /* column_family_id */);
mem2->Ref();
mem2->Add(++seq, kTypeDeletion, "key1", "");
mem2->Add(++seq, kTypeValue, "key2", "value2.3");
// Add second memtable to list
list.Add(mem2, &to_delete);
// Fetch keys via MemTableList
merge_context.Clear();
found = list.current()->Get(LookupKey("key1", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_TRUE(found && s.IsNotFound());
merge_context.Clear();
found = list.current()->Get(LookupKey("key1", saved_seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_TRUE(s.ok() && found);
ASSERT_EQ("value1", value);
merge_context.Clear();
found = list.current()->Get(LookupKey("key2", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_TRUE(s.ok() && found);
ASSERT_EQ(value, "value2.3");
merge_context.Clear();
found = list.current()->Get(LookupKey("key2", 1), &value, &s, &merge_context,
&range_del_agg, ReadOptions());
ASSERT_FALSE(found);
ASSERT_EQ(2, list.NumNotFlushed());
list.current()->Unref(&to_delete);
for (MemTable* m : to_delete) {
delete m;
}
}
TEST_F(MemTableListTest, GetFromHistoryTest) {
// Create MemTableList
int min_write_buffer_number_to_merge = 2;
int max_write_buffer_number_to_maintain = 2;
MemTableList list(min_write_buffer_number_to_merge,
max_write_buffer_number_to_maintain);
SequenceNumber seq = 1;
std::string value;
Status s;
MergeContext merge_context;
InternalKeyComparator ikey_cmp(options.comparator);
RangeDelAggregator range_del_agg(ikey_cmp, {} /* snapshots */);
autovector<MemTable*> to_delete;
LookupKey lkey("key1", seq);
bool found = list.current()->Get(lkey, &value, &s, &merge_context,
&range_del_agg, ReadOptions());
ASSERT_FALSE(found);
// Create a MemTable
InternalKeyComparator cmp(BytewiseComparator());
auto factory = std::make_shared<SkipListFactory>();
options.memtable_factory = factory;
ImmutableCFOptions ioptions(options);
WriteBufferManager wb(options.db_write_buffer_size);
MemTable* mem = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb,
kMaxSequenceNumber, 0 /* column_family_id */);
mem->Ref();
// Write some keys to this memtable.
mem->Add(++seq, kTypeDeletion, "key1", "");
mem->Add(++seq, kTypeValue, "key2", "value2");
mem->Add(++seq, kTypeValue, "key2", "value2.2");
// Fetch the newly written keys
merge_context.Clear();
found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context,
&range_del_agg, ReadOptions());
// MemTable found out that this key is *not* found (at this sequence#)
ASSERT_TRUE(found && s.IsNotFound());
merge_context.Clear();
found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context,
&range_del_agg, ReadOptions());
ASSERT_TRUE(s.ok() && found);
ASSERT_EQ(value, "value2.2");
// Add memtable to list
list.Add(mem, &to_delete);
ASSERT_EQ(0, to_delete.size());
// Fetch keys via MemTableList
merge_context.Clear();
found = list.current()->Get(LookupKey("key1", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_TRUE(found && s.IsNotFound());
merge_context.Clear();
found = list.current()->Get(LookupKey("key2", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_TRUE(s.ok() && found);
ASSERT_EQ("value2.2", value);
// Flush this memtable from the list.
// (It will then be a part of the memtable history).
autovector<MemTable*> to_flush;
list.PickMemtablesToFlush(&to_flush);
ASSERT_EQ(1, to_flush.size());
s = Mock_InstallMemtableFlushResults(&list, MutableCFOptions(options),
to_flush, &to_delete);
ASSERT_OK(s);
ASSERT_EQ(0, list.NumNotFlushed());
ASSERT_EQ(1, list.NumFlushed());
ASSERT_EQ(0, to_delete.size());
// Verify keys are no longer in MemTableList
merge_context.Clear();
found = list.current()->Get(LookupKey("key1", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_FALSE(found);
merge_context.Clear();
found = list.current()->Get(LookupKey("key2", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_FALSE(found);
// Verify keys are present in history
merge_context.Clear();
found = list.current()->GetFromHistory(LookupKey("key1", seq), &value, &s,
&merge_context, &range_del_agg,
ReadOptions());
ASSERT_TRUE(found && s.IsNotFound());
merge_context.Clear();
found = list.current()->GetFromHistory(LookupKey("key2", seq), &value, &s,
&merge_context, &range_del_agg,
ReadOptions());
ASSERT_TRUE(found);
ASSERT_EQ("value2.2", value);
// Create another memtable and write some keys to it
WriteBufferManager wb2(options.db_write_buffer_size);
MemTable* mem2 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb2,
kMaxSequenceNumber, 0 /* column_family_id */);
mem2->Ref();
mem2->Add(++seq, kTypeDeletion, "key1", "");
mem2->Add(++seq, kTypeValue, "key3", "value3");
// Add second memtable to list
list.Add(mem2, &to_delete);
ASSERT_EQ(0, to_delete.size());
to_flush.clear();
list.PickMemtablesToFlush(&to_flush);
ASSERT_EQ(1, to_flush.size());
// Flush second memtable
s = Mock_InstallMemtableFlushResults(&list, MutableCFOptions(options),
to_flush, &to_delete);
ASSERT_OK(s);
ASSERT_EQ(0, list.NumNotFlushed());
ASSERT_EQ(2, list.NumFlushed());
ASSERT_EQ(0, to_delete.size());
// Add a third memtable to push the first memtable out of the history
WriteBufferManager wb3(options.db_write_buffer_size);
MemTable* mem3 = new MemTable(cmp, ioptions, MutableCFOptions(options), &wb3,
kMaxSequenceNumber, 0 /* column_family_id */);
mem3->Ref();
list.Add(mem3, &to_delete);
ASSERT_EQ(1, list.NumNotFlushed());
ASSERT_EQ(1, list.NumFlushed());
ASSERT_EQ(1, to_delete.size());
// Verify keys are no longer in MemTableList
merge_context.Clear();
found = list.current()->Get(LookupKey("key1", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_FALSE(found);
merge_context.Clear();
found = list.current()->Get(LookupKey("key2", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_FALSE(found);
merge_context.Clear();
found = list.current()->Get(LookupKey("key3", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_FALSE(found);
// Verify that the second memtable's keys are in the history
merge_context.Clear();
found = list.current()->GetFromHistory(LookupKey("key1", seq), &value, &s,
&merge_context, &range_del_agg,
ReadOptions());
ASSERT_TRUE(found && s.IsNotFound());
merge_context.Clear();
found = list.current()->GetFromHistory(LookupKey("key3", seq), &value, &s,
&merge_context, &range_del_agg,
ReadOptions());
ASSERT_TRUE(found);
ASSERT_EQ("value3", value);
// Verify that key2 from the first memtable is no longer in the history
merge_context.Clear();
found = list.current()->Get(LookupKey("key2", seq), &value, &s,
&merge_context, &range_del_agg, ReadOptions());
ASSERT_FALSE(found);
// Cleanup
list.current()->Unref(&to_delete);
ASSERT_EQ(3, to_delete.size());
for (MemTable* m : to_delete) {
delete m;
}
}
TEST_F(MemTableListTest, FlushPendingTest) {
const int num_tables = 5;
SequenceNumber seq = 1;
Status s;
auto factory = std::make_shared<SkipListFactory>();
options.memtable_factory = factory;
ImmutableCFOptions ioptions(options);
InternalKeyComparator cmp(BytewiseComparator());
WriteBufferManager wb(options.db_write_buffer_size);
autovector<MemTable*> to_delete;
// Create MemTableList
int min_write_buffer_number_to_merge = 3;
int max_write_buffer_number_to_maintain = 7;
MemTableList list(min_write_buffer_number_to_merge,
max_write_buffer_number_to_maintain);
// Create some MemTables
std::vector<MemTable*> tables;
MutableCFOptions mutable_cf_options(options);
for (int i = 0; i < num_tables; i++) {
MemTable* mem = new MemTable(cmp, ioptions, mutable_cf_options, &wb,
kMaxSequenceNumber, 0 /* column_family_id */);
mem->Ref();
std::string value;
MergeContext merge_context;
mem->Add(++seq, kTypeValue, "key1", ToString(i));
mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN");
mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value");
mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM");
mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), "");
tables.push_back(mem);
}
// Nothing to flush
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
autovector<MemTable*> to_flush;
list.PickMemtablesToFlush(&to_flush);
ASSERT_EQ(0, to_flush.size());
// Request a flush even though there is nothing to flush
list.FlushRequested();
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Attempt to 'flush' to clear request for flush
list.PickMemtablesToFlush(&to_flush);
ASSERT_EQ(0, to_flush.size());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Request a flush again
list.FlushRequested();
// No flush pending since the list is empty.
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Add 2 tables
list.Add(tables[0], &to_delete);
list.Add(tables[1], &to_delete);
ASSERT_EQ(2, list.NumNotFlushed());
ASSERT_EQ(0, to_delete.size());
// Even though we have less than the minimum to flush, a flush is
// pending since we had previously requested a flush and never called
// PickMemtablesToFlush() to clear the flush.
ASSERT_TRUE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
// Pick tables to flush
list.PickMemtablesToFlush(&to_flush);
ASSERT_EQ(2, to_flush.size());
ASSERT_EQ(2, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Revert flush
list.RollbackMemtableFlush(to_flush, 0);
ASSERT_FALSE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
to_flush.clear();
// Add another table
list.Add(tables[2], &to_delete);
// We now have the minimum to flush regardles of whether FlushRequested()
// was called.
ASSERT_TRUE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
ASSERT_EQ(0, to_delete.size());
// Pick tables to flush
list.PickMemtablesToFlush(&to_flush);
ASSERT_EQ(3, to_flush.size());
ASSERT_EQ(3, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Pick tables to flush again
autovector<MemTable*> to_flush2;
list.PickMemtablesToFlush(&to_flush2);
ASSERT_EQ(0, to_flush2.size());
ASSERT_EQ(3, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Add another table
list.Add(tables[3], &to_delete);
ASSERT_FALSE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
ASSERT_EQ(0, to_delete.size());
// Request a flush again
list.FlushRequested();
ASSERT_TRUE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
// Pick tables to flush again
list.PickMemtablesToFlush(&to_flush2);
ASSERT_EQ(1, to_flush2.size());
ASSERT_EQ(4, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Rollback first pick of tables
list.RollbackMemtableFlush(to_flush, 0);
ASSERT_TRUE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
to_flush.clear();
// Add another tables
list.Add(tables[4], &to_delete);
ASSERT_EQ(5, list.NumNotFlushed());
// We now have the minimum to flush regardles of whether FlushRequested()
ASSERT_TRUE(list.IsFlushPending());
ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire));
ASSERT_EQ(0, to_delete.size());
// Pick tables to flush
list.PickMemtablesToFlush(&to_flush);
// Should pick 4 of 5 since 1 table has been picked in to_flush2
ASSERT_EQ(4, to_flush.size());
ASSERT_EQ(5, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Pick tables to flush again
autovector<MemTable*> to_flush3;
ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed
ASSERT_EQ(5, list.NumNotFlushed());
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Flush the 4 memtables that were picked in to_flush
s = Mock_InstallMemtableFlushResults(&list, MutableCFOptions(options),
to_flush, &to_delete);
ASSERT_OK(s);
// Note: now to_flush contains tables[0,1,2,4]. to_flush2 contains
// tables[3].
// Current implementation will only commit memtables in the order they were
// created. So InstallMemtableFlushResults will install the first 3 tables
// in to_flush and stop when it encounters a table not yet flushed.
ASSERT_EQ(2, list.NumNotFlushed());
int num_in_history = std::min(3, max_write_buffer_number_to_maintain);
ASSERT_EQ(num_in_history, list.NumFlushed());
ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size());
// Request a flush again. Should be nothing to flush
list.FlushRequested();
ASSERT_FALSE(list.IsFlushPending());
ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire));
// Flush the 1 memtable that was picked in to_flush2
s = MemTableListTest::Mock_InstallMemtableFlushResults(
&list, MutableCFOptions(options), to_flush2, &to_delete);
ASSERT_OK(s);
// This will actually install 2 tables. The 1 we told it to flush, and also
// tables[4] which has been waiting for tables[3] to commit.
ASSERT_EQ(0, list.NumNotFlushed());
num_in_history = std::min(5, max_write_buffer_number_to_maintain);
ASSERT_EQ(num_in_history, list.NumFlushed());
ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size());
for (const auto& m : to_delete) {
// Refcount should be 0 after calling InstallMemtableFlushResults.
// Verify this, by Ref'ing then UnRef'ing:
m->Ref();
ASSERT_EQ(m, m->Unref());
delete m;
}
to_delete.clear();
list.current()->Unref(&to_delete);
int to_delete_size = std::min(5, max_write_buffer_number_to_maintain);
ASSERT_EQ(to_delete_size, to_delete.size());
for (const auto& m : to_delete) {
// Refcount should be 0 after calling InstallMemtableFlushResults.
// Verify this, by Ref'ing then UnRef'ing:
m->Ref();
ASSERT_EQ(m, m->Unref());
delete m;
}
to_delete.clear();
}
} // namespace rocksdb
int main(int argc, char** argv) {
::testing::InitGoogleTest(&argc, argv);
return RUN_ALL_TESTS();
}

View File

@ -1,29 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#ifndef ROCKSDB_LITE
#include <memory>
#include "rocksdb/table_properties.h"
namespace rocksdb {
// Creates a factory of a table property collector that marks a SST
// file as need-compaction when it observe at least "D" deletion
// entries in any "N" consecutive entires.
//
// @param sliding_window_size "N". Note that this number will be
// round up to the smallest multiple of 128 that is no less
// than the specified size.
// @param deletion_trigger "D". Note that even when "N" is changed,
// the specified number for "D" will not be changed.
extern std::shared_ptr<TablePropertiesCollectorFactory>
NewCompactOnDeletionCollectorFactory(
size_t sliding_window_size,
size_t deletion_trigger);
} // namespace rocksdb
#endif // !ROCKSDB_LITE

View File

@ -1,50 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
package org.rocksdb;
/**
* Options for FIFO Compaction
*/
public class CompactionOptionsFIFO extends RocksObject {
public CompactionOptionsFIFO() {
super(newCompactionOptionsFIFO());
}
/**
* Once the total sum of table files reaches this, we will delete the oldest
* table file
*
* Default: 1GB
*
* @param maxTableFilesSize The maximum size of the table files
*
* @return the reference to the current options.
*/
public CompactionOptionsFIFO setMaxTableFilesSize(
final long maxTableFilesSize) {
setMaxTableFilesSize(nativeHandle_, maxTableFilesSize);
return this;
}
/**
* Once the total sum of table files reaches this, we will delete the oldest
* table file
*
* Default: 1GB
*
* @return max table file size in bytes
*/
public long maxTableFilesSize() {
return maxTableFilesSize(nativeHandle_);
}
private native void setMaxTableFilesSize(long handle, long maxTableFilesSize);
private native long maxTableFilesSize(long handle);
private native static long newCompactionOptionsFIFO();
@Override protected final native void disposeInternal(final long handle);
}

View File

@ -1,197 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
#include "monitoring/statistics.h"
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS
#endif
#include <inttypes.h>
#include "rocksdb/statistics.h"
#include "port/likely.h"
#include <algorithm>
#include <cstdio>
namespace rocksdb {
std::shared_ptr<Statistics> CreateDBStatistics() {
return std::make_shared<StatisticsImpl>(nullptr, false);
}
StatisticsImpl::StatisticsImpl(std::shared_ptr<Statistics> stats,
bool enable_internal_stats)
: stats_(std::move(stats)), enable_internal_stats_(enable_internal_stats) {}
StatisticsImpl::~StatisticsImpl() {}
uint64_t StatisticsImpl::getTickerCount(uint32_t tickerType) const {
MutexLock lock(&aggregate_lock_);
return getTickerCountLocked(tickerType);
}
uint64_t StatisticsImpl::getTickerCountLocked(uint32_t tickerType) const {
assert(
enable_internal_stats_ ?
tickerType < INTERNAL_TICKER_ENUM_MAX :
tickerType < TICKER_ENUM_MAX);
uint64_t res = 0;
for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
res += per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType];
}
return res;
}
void StatisticsImpl::histogramData(uint32_t histogramType,
HistogramData* const data) const {
MutexLock lock(&aggregate_lock_);
getHistogramImplLocked(histogramType)->Data(data);
}
std::unique_ptr<HistogramImpl> StatisticsImpl::getHistogramImplLocked(
uint32_t histogramType) const {
assert(
enable_internal_stats_ ?
histogramType < INTERNAL_HISTOGRAM_ENUM_MAX :
histogramType < HISTOGRAM_ENUM_MAX);
std::unique_ptr<HistogramImpl> res_hist(new HistogramImpl());
for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
res_hist->Merge(
per_core_stats_.AccessAtCore(core_idx)->histograms_[histogramType]);
}
return res_hist;
}
std::string StatisticsImpl::getHistogramString(uint32_t histogramType) const {
MutexLock lock(&aggregate_lock_);
return getHistogramImplLocked(histogramType)->ToString();
}
void StatisticsImpl::setTickerCount(uint32_t tickerType, uint64_t count) {
{
MutexLock lock(&aggregate_lock_);
setTickerCountLocked(tickerType, count);
}
if (stats_ && tickerType < TICKER_ENUM_MAX) {
stats_->setTickerCount(tickerType, count);
}
}
void StatisticsImpl::setTickerCountLocked(uint32_t tickerType, uint64_t count) {
assert(enable_internal_stats_ ? tickerType < INTERNAL_TICKER_ENUM_MAX
: tickerType < TICKER_ENUM_MAX);
for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
if (core_idx == 0) {
per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType] = count;
} else {
per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType] = 0;
}
}
}
uint64_t StatisticsImpl::getAndResetTickerCount(uint32_t tickerType) {
uint64_t sum = 0;
{
MutexLock lock(&aggregate_lock_);
assert(enable_internal_stats_ ? tickerType < INTERNAL_TICKER_ENUM_MAX
: tickerType < TICKER_ENUM_MAX);
for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
sum +=
per_core_stats_.AccessAtCore(core_idx)->tickers_[tickerType].exchange(
0, std::memory_order_relaxed);
}
}
if (stats_ && tickerType < TICKER_ENUM_MAX) {
stats_->setTickerCount(tickerType, 0);
}
return sum;
}
void StatisticsImpl::recordTick(uint32_t tickerType, uint64_t count) {
assert(
enable_internal_stats_ ?
tickerType < INTERNAL_TICKER_ENUM_MAX :
tickerType < TICKER_ENUM_MAX);
per_core_stats_.Access()->tickers_[tickerType].fetch_add(
count, std::memory_order_relaxed);
if (stats_ && tickerType < TICKER_ENUM_MAX) {
stats_->recordTick(tickerType, count);
}
}
void StatisticsImpl::measureTime(uint32_t histogramType, uint64_t value) {
assert(
enable_internal_stats_ ?
histogramType < INTERNAL_HISTOGRAM_ENUM_MAX :
histogramType < HISTOGRAM_ENUM_MAX);
per_core_stats_.Access()->histograms_[histogramType].Add(value);
if (stats_ && histogramType < HISTOGRAM_ENUM_MAX) {
stats_->measureTime(histogramType, value);
}
}
Status StatisticsImpl::Reset() {
MutexLock lock(&aggregate_lock_);
for (uint32_t i = 0; i < TICKER_ENUM_MAX; ++i) {
setTickerCountLocked(i, 0);
}
for (uint32_t i = 0; i < HISTOGRAM_ENUM_MAX; ++i) {
for (size_t core_idx = 0; core_idx < per_core_stats_.Size(); ++core_idx) {
per_core_stats_.AccessAtCore(core_idx)->histograms_[i].Clear();
}
}
return Status::OK();
}
namespace {
// a buffer size used for temp string buffers
const int kTmpStrBufferSize = 200;
} // namespace
std::string StatisticsImpl::ToString() const {
MutexLock lock(&aggregate_lock_);
std::string res;
res.reserve(20000);
for (const auto& t : TickersNameMap) {
if (t.first < TICKER_ENUM_MAX || enable_internal_stats_) {
char buffer[kTmpStrBufferSize];
snprintf(buffer, kTmpStrBufferSize, "%s COUNT : %" PRIu64 "\n",
t.second.c_str(), getTickerCountLocked(t.first));
res.append(buffer);
}
}
for (const auto& h : HistogramsNameMap) {
if (h.first < HISTOGRAM_ENUM_MAX || enable_internal_stats_) {
char buffer[kTmpStrBufferSize];
HistogramData hData;
getHistogramImplLocked(h.first)->Data(&hData);
// don't handle failures - buffer should always be big enough and arguments
// should be provided correctly
int ret = snprintf(
buffer, kTmpStrBufferSize,
"%s P50 : %f P95 : %f P99 : %f P100 : %f COUNT : %" PRIu64 " SUM : %"
PRIu64 "\n", h.second.c_str(), hData.median, hData.percentile95,
hData.percentile99, hData.max, hData.count, hData.sum);
if (ret < 0 || ret >= kTmpStrBufferSize) {
assert(false);
continue;
}
res.append(buffer);
}
}
res.shrink_to_fit();
return res;
}
bool StatisticsImpl::HistEnabledForType(uint32_t type) const {
if (LIKELY(!enable_internal_stats_)) {
return type < HISTOGRAM_ENUM_MAX;
}
return true;
}
} // namespace rocksdb

View File

@ -1,50 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
//
// Copyright (c) 2011 The LevelDB Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
//
// Must not be included from any .h files to avoid polluting the namespace
// with macros.
#pragma once
#include "port/port.h"
// Helper macros that include information about file name and line number
#define STRINGIFY(x) #x
#define TOSTRING(x) STRINGIFY(x)
#define PREPEND_FILE_LINE(FMT) ("[" __FILE__ ":" TOSTRING(__LINE__) "] " FMT)
// Don't inclide file/line info in HEADER level
#define ROCKS_LOG_HEADER(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::HEADER_LEVEL, LGR, FMT, ##__VA_ARGS__)
#define ROCKS_LOG_DEBUG(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::DEBUG_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_INFO(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::INFO_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_WARN(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::WARN_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_ERROR(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::ERROR_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_FATAL(LGR, FMT, ...) \
rocksdb::Log(InfoLogLevel::FATAL_LEVEL, LGR, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)
#define ROCKS_LOG_BUFFER(LOG_BUF, FMT, ...) \
rocksdb::LogToBuffer(LOG_BUF, PREPEND_FILE_LINE(FMT), ##__VA_ARGS__)
#define ROCKS_LOG_BUFFER_MAX_SZ(LOG_BUF, MAX_LOG_SIZE, FMT, ...) \
rocksdb::LogToBuffer(LOG_BUF, MAX_LOG_SIZE, PREPEND_FILE_LINE(FMT), \
##__VA_ARGS__)

View File

@ -1,260 +0,0 @@
// Copyright (c) 2011-present, Facebook, Inc. All rights reserved.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#include "util/sst_file_manager_impl.h"
#include <vector>
#include "port/port.h"
#include "rocksdb/env.h"
#include "rocksdb/sst_file_manager.h"
#include "util/mutexlock.h"
#include "util/sync_point.h"
namespace rocksdb {
#ifndef ROCKSDB_LITE
SstFileManagerImpl::SstFileManagerImpl(Env* env, std::shared_ptr<Logger> logger,
int64_t rate_bytes_per_sec,
double max_trash_db_ratio,
uint64_t bytes_max_delete_chunk)
: env_(env),
logger_(logger),
total_files_size_(0),
compaction_buffer_size_(0),
cur_compactions_reserved_size_(0),
max_allowed_space_(0),
delete_scheduler_(env, rate_bytes_per_sec, logger.get(), this,
max_trash_db_ratio, bytes_max_delete_chunk) {}
SstFileManagerImpl::~SstFileManagerImpl() {}
Status SstFileManagerImpl::OnAddFile(const std::string& file_path) {
uint64_t file_size;
Status s = env_->GetFileSize(file_path, &file_size);
if (s.ok()) {
MutexLock l(&mu_);
OnAddFileImpl(file_path, file_size);
}
TEST_SYNC_POINT("SstFileManagerImpl::OnAddFile");
return s;
}
Status SstFileManagerImpl::OnDeleteFile(const std::string& file_path) {
{
MutexLock l(&mu_);
OnDeleteFileImpl(file_path);
}
TEST_SYNC_POINT("SstFileManagerImpl::OnDeleteFile");
return Status::OK();
}
void SstFileManagerImpl::OnCompactionCompletion(Compaction* c) {
MutexLock l(&mu_);
uint64_t size_added_by_compaction = 0;
for (size_t i = 0; i < c->num_input_levels(); i++) {
for (size_t j = 0; j < c->num_input_files(i); j++) {
FileMetaData* filemeta = c->input(i, j);
size_added_by_compaction += filemeta->fd.GetFileSize();
}
}
cur_compactions_reserved_size_ -= size_added_by_compaction;
}
Status SstFileManagerImpl::OnMoveFile(const std::string& old_path,
const std::string& new_path,
uint64_t* file_size) {
{
MutexLock l(&mu_);
if (file_size != nullptr) {
*file_size = tracked_files_[old_path];
}
OnAddFileImpl(new_path, tracked_files_[old_path]);
OnDeleteFileImpl(old_path);
}
TEST_SYNC_POINT("SstFileManagerImpl::OnMoveFile");
return Status::OK();
}
void SstFileManagerImpl::SetMaxAllowedSpaceUsage(uint64_t max_allowed_space) {
MutexLock l(&mu_);
max_allowed_space_ = max_allowed_space;
}
void SstFileManagerImpl::SetCompactionBufferSize(
uint64_t compaction_buffer_size) {
MutexLock l(&mu_);
compaction_buffer_size_ = compaction_buffer_size;
}
bool SstFileManagerImpl::IsMaxAllowedSpaceReached() {
MutexLock l(&mu_);
if (max_allowed_space_ <= 0) {
return false;
}
return total_files_size_ >= max_allowed_space_;
}
bool SstFileManagerImpl::IsMaxAllowedSpaceReachedIncludingCompactions() {
MutexLock l(&mu_);
if (max_allowed_space_ <= 0) {
return false;
}
return total_files_size_ + cur_compactions_reserved_size_ >=
max_allowed_space_;
}
bool SstFileManagerImpl::EnoughRoomForCompaction(
const std::vector<CompactionInputFiles>& inputs) {
MutexLock l(&mu_);
uint64_t size_added_by_compaction = 0;
// First check if we even have the space to do the compaction
for (size_t i = 0; i < inputs.size(); i++) {
for (size_t j = 0; j < inputs[i].size(); j++) {
FileMetaData* filemeta = inputs[i][j];
size_added_by_compaction += filemeta->fd.GetFileSize();
}
}
if (max_allowed_space_ != 0 &&
(size_added_by_compaction + cur_compactions_reserved_size_ +
total_files_size_ + compaction_buffer_size_ >
max_allowed_space_)) {
return false;
}
// Update cur_compactions_reserved_size_ so concurrent compaction
// don't max out space
cur_compactions_reserved_size_ += size_added_by_compaction;
return true;
}
uint64_t SstFileManagerImpl::GetCompactionsReservedSize() {
MutexLock l(&mu_);
return cur_compactions_reserved_size_;
}
uint64_t SstFileManagerImpl::GetTotalSize() {
MutexLock l(&mu_);
return total_files_size_;
}
std::unordered_map<std::string, uint64_t>
SstFileManagerImpl::GetTrackedFiles() {
MutexLock l(&mu_);
return tracked_files_;
}
int64_t SstFileManagerImpl::GetDeleteRateBytesPerSecond() {
return delete_scheduler_.GetRateBytesPerSecond();
}
void SstFileManagerImpl::SetDeleteRateBytesPerSecond(int64_t delete_rate) {
return delete_scheduler_.SetRateBytesPerSecond(delete_rate);
}
double SstFileManagerImpl::GetMaxTrashDBRatio() {
return delete_scheduler_.GetMaxTrashDBRatio();
}
void SstFileManagerImpl::SetMaxTrashDBRatio(double r) {
return delete_scheduler_.SetMaxTrashDBRatio(r);
}
uint64_t SstFileManagerImpl::GetTotalTrashSize() {
return delete_scheduler_.GetTotalTrashSize();
}
Status SstFileManagerImpl::ScheduleFileDeletion(
const std::string& file_path, const std::string& path_to_sync) {
return delete_scheduler_.DeleteFile(file_path, path_to_sync);
}
void SstFileManagerImpl::WaitForEmptyTrash() {
delete_scheduler_.WaitForEmptyTrash();
}
void SstFileManagerImpl::OnAddFileImpl(const std::string& file_path,
uint64_t file_size) {
auto tracked_file = tracked_files_.find(file_path);
if (tracked_file != tracked_files_.end()) {
// File was added before, we will just update the size
total_files_size_ -= tracked_file->second;
total_files_size_ += file_size;
} else {
total_files_size_ += file_size;
}
tracked_files_[file_path] = file_size;
}
void SstFileManagerImpl::OnDeleteFileImpl(const std::string& file_path) {
auto tracked_file = tracked_files_.find(file_path);
if (tracked_file == tracked_files_.end()) {
// File is not tracked
return;
}
total_files_size_ -= tracked_file->second;
tracked_files_.erase(tracked_file);
}
SstFileManager* NewSstFileManager(Env* env, std::shared_ptr<Logger> info_log,
std::string trash_dir,
int64_t rate_bytes_per_sec,
bool delete_existing_trash, Status* status,
double max_trash_db_ratio,
uint64_t bytes_max_delete_chunk) {
SstFileManagerImpl* res =
new SstFileManagerImpl(env, info_log, rate_bytes_per_sec,
max_trash_db_ratio, bytes_max_delete_chunk);
// trash_dir is deprecated and not needed anymore, but if user passed it
// we will still remove files in it.
Status s;
if (delete_existing_trash && trash_dir != "") {
std::vector<std::string> files_in_trash;
s = env->GetChildren(trash_dir, &files_in_trash);
if (s.ok()) {
for (const std::string& trash_file : files_in_trash) {
if (trash_file == "." || trash_file == "..") {
continue;
}
std::string path_in_trash = trash_dir + "/" + trash_file;
res->OnAddFile(path_in_trash);
Status file_delete =
res->ScheduleFileDeletion(path_in_trash, trash_dir);
if (s.ok() && !file_delete.ok()) {
s = file_delete;
}
}
}
}
if (status) {
*status = s;
}
return res;
}
#else
SstFileManager* NewSstFileManager(Env* /*env*/,
std::shared_ptr<Logger> /*info_log*/,
std::string /*trash_dir*/,
int64_t /*rate_bytes_per_sec*/,
bool /*delete_existing_trash*/,
Status* status, double /*max_trash_db_ratio*/,
uint64_t /*bytes_max_delete_chunk*/) {
if (status) {
*status =
Status::NotSupported("SstFileManager is not supported in ROCKSDB_LITE");
}
return nullptr;
}
#endif // ROCKSDB_LITE
} // namespace rocksdb

View File

@ -1,479 +0,0 @@
/*
xxHash - Fast Hash algorithm
Copyright (C) 2012-2014, Yann Collet.
BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
You can contact the author at :
- xxHash source repository : http://code.google.com/p/xxhash/
*/
//**************************************
// Tuning parameters
//**************************************
// Unaligned memory access is automatically enabled for "common" CPU, such as x86.
// For others CPU, the compiler will be more cautious, and insert extra code to ensure aligned access is respected.
// If you know your target CPU supports unaligned memory access, you want to force this option manually to improve performance.
// You can also enable this parameter if you know your input data will always be aligned (boundaries of 4, for U32).
#if defined(__ARM_FEATURE_UNALIGNED) || defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || defined(_M_X64)
# define XXH_USE_UNALIGNED_ACCESS 1
#endif
// XXH_ACCEPT_NULL_INPUT_POINTER :
// If the input pointer is a null pointer, xxHash default behavior is to trigger a memory access error, since it is a bad pointer.
// When this option is enabled, xxHash output for null input pointers will be the same as a null-length input.
// This option has a very small performance cost (only measurable on small inputs).
// By default, this option is disabled. To enable it, uncomment below define :
//#define XXH_ACCEPT_NULL_INPUT_POINTER 1
// XXH_FORCE_NATIVE_FORMAT :
// By default, xxHash library provides endian-independent Hash values, based on little-endian convention.
// Results are therefore identical for little-endian and big-endian CPU.
// This comes at a performance cost for big-endian CPU, since some swapping is required to emulate little-endian format.
// Should endian-independence be of no importance for your application, you may set the #define below to 1.
// It will improve speed for Big-endian CPU.
// This option has no impact on Little_Endian CPU.
#define XXH_FORCE_NATIVE_FORMAT 0
//**************************************
// Compiler Specific Options
//**************************************
// Disable some Visual warning messages
#ifdef _MSC_VER // Visual Studio
# pragma warning(disable : 4127) // disable: C4127: conditional expression is constant
# pragma warning(disable : 4804) // disable: C4804: 'operation' : unsafe use of type 'bool' in operation (static assert line 313)
#endif
#ifdef _MSC_VER // Visual Studio
# define FORCE_INLINE static __forceinline
#else
# ifdef __GNUC__
# define FORCE_INLINE static inline __attribute__((always_inline))
# else
# define FORCE_INLINE static inline
# endif
#endif
//**************************************
// Includes & Memory related functions
//**************************************
#include "xxhash.h"
// Modify the local functions below should you wish to use some other memory related routines
// for malloc(), free()
#include <stdlib.h>
FORCE_INLINE void* XXH_malloc(size_t s) { return malloc(s); }
FORCE_INLINE void XXH_free (void* p) { free(p); }
// for memcpy()
#include <string.h>
FORCE_INLINE void* XXH_memcpy(void* dest, const void* src, size_t size) { return memcpy(dest,src,size); }
namespace rocksdb {
//**************************************
// Basic Types
//**************************************
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L // C99
# include <stdint.h>
typedef uint8_t BYTE;
typedef uint16_t U16;
typedef uint32_t U32;
typedef int32_t S32;
typedef uint64_t U64;
#else
typedef unsigned char BYTE;
typedef unsigned short U16;
typedef unsigned int U32;
typedef signed int S32;
typedef unsigned long long U64;
#endif
#if defined(__GNUC__) && !defined(XXH_USE_UNALIGNED_ACCESS)
# define _PACKED __attribute__ ((packed))
#else
# define _PACKED
#endif
#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
# ifdef __IBMC__
# pragma pack(1)
# else
# pragma pack(push, 1)
# endif
#endif
typedef struct _U32_S { U32 v; } _PACKED U32_S;
#if !defined(XXH_USE_UNALIGNED_ACCESS) && !defined(__GNUC__)
# pragma pack(pop)
#endif
#define A32(x) (((U32_S *)(x))->v)
//***************************************
// Compiler-specific Functions and Macros
//***************************************
#define GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
// Note : although _rotl exists for minGW (GCC under windows), performance seems poor
#if defined(_MSC_VER)
# define XXH_rotl32(x,r) _rotl(x,r)
#else
# define XXH_rotl32(x,r) ((x << r) | (x >> (32 - r)))
#endif
#if defined(_MSC_VER) // Visual Studio
# define XXH_swap32 _byteswap_ulong
#elif GCC_VERSION >= 403
# define XXH_swap32 __builtin_bswap32
#else
static inline U32 XXH_swap32 (U32 x) {
return ((x << 24) & 0xff000000 ) |
((x << 8) & 0x00ff0000 ) |
((x >> 8) & 0x0000ff00 ) |
((x >> 24) & 0x000000ff );}
#endif
//**************************************
// Constants
//**************************************
#define PRIME32_1 2654435761U
#define PRIME32_2 2246822519U
#define PRIME32_3 3266489917U
#define PRIME32_4 668265263U
#define PRIME32_5 374761393U
//**************************************
// Architecture Macros
//**************************************
typedef enum { XXH_bigEndian=0, XXH_littleEndian=1 } XXH_endianess;
#ifndef XXH_CPU_LITTLE_ENDIAN // It is possible to define XXH_CPU_LITTLE_ENDIAN externally, for example using a compiler switch
static const int one = 1;
# define XXH_CPU_LITTLE_ENDIAN (*(char*)(&one))
#endif
//**************************************
// Macros
//**************************************
#define XXH_STATIC_ASSERT(c) { enum { XXH_static_assert = 1/(!!(c)) }; } // use only *after* variable declarations
//****************************
// Memory reads
//****************************
typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
FORCE_INLINE U32 XXH_readLE32_align(const U32* ptr, XXH_endianess endian, XXH_alignment align)
{
if (align==XXH_unaligned)
return endian==XXH_littleEndian ? A32(ptr) : XXH_swap32(A32(ptr));
else
return endian==XXH_littleEndian ? *ptr : XXH_swap32(*ptr);
}
FORCE_INLINE U32 XXH_readLE32(const U32* ptr, XXH_endianess endian) { return XXH_readLE32_align(ptr, endian, XXH_unaligned); }
//****************************
// Simple Hash Functions
//****************************
FORCE_INLINE U32 XXH32_endian_align(const void* input, int len, U32 seed, XXH_endianess endian, XXH_alignment align)
{
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
U32 h32;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (p==NULL) { len=0; p=(const BYTE*)(size_t)16; }
#endif
if (len>=16)
{
const BYTE* const limit = bEnd - 16;
U32 v1 = seed + PRIME32_1 + PRIME32_2;
U32 v2 = seed + PRIME32_2;
U32 v3 = seed + 0;
U32 v4 = seed - PRIME32_1;
do
{
v1 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
v2 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
v3 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
v4 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
} while (p<=limit);
h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) + XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
}
else
{
h32 = seed + PRIME32_5;
}
h32 += (U32) len;
while (p<=bEnd-4)
{
h32 += XXH_readLE32_align((const U32*)p, endian, align) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4 ;
p+=4;
}
while (p<bEnd)
{
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1 ;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
U32 XXH32(const void* input, int len, U32 seed)
{
#if 0
// Simple version, good for code maintenance, but unfortunately slow for small inputs
void* state = XXH32_init(seed);
XXH32_update(state, input, len);
return XXH32_digest(state);
#else
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
# if !defined(XXH_USE_UNALIGNED_ACCESS)
if ((((size_t)input) & 3)) // Input is aligned, let's leverage the speed advantage
{
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_aligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_aligned);
}
# endif
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_endian_align(input, len, seed, XXH_littleEndian, XXH_unaligned);
else
return XXH32_endian_align(input, len, seed, XXH_bigEndian, XXH_unaligned);
#endif
}
//****************************
// Advanced Hash Functions
//****************************
struct XXH_state32_t
{
U64 total_len;
U32 seed;
U32 v1;
U32 v2;
U32 v3;
U32 v4;
int memsize;
char memory[16];
};
int XXH32_sizeofState()
{
XXH_STATIC_ASSERT(XXH32_SIZEOFSTATE >= sizeof(struct XXH_state32_t)); // A compilation error here means XXH32_SIZEOFSTATE is not large enough
return sizeof(struct XXH_state32_t);
}
XXH_errorcode XXH32_resetState(void* state_in, U32 seed)
{
struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
state->seed = seed;
state->v1 = seed + PRIME32_1 + PRIME32_2;
state->v2 = seed + PRIME32_2;
state->v3 = seed + 0;
state->v4 = seed - PRIME32_1;
state->total_len = 0;
state->memsize = 0;
return XXH_OK;
}
void* XXH32_init (U32 seed)
{
void* state = XXH_malloc (sizeof(struct XXH_state32_t));
XXH32_resetState(state, seed);
return state;
}
FORCE_INLINE XXH_errorcode XXH32_update_endian (void* state_in, const void* input, int len, XXH_endianess endian)
{
struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
const BYTE* p = (const BYTE*)input;
const BYTE* const bEnd = p + len;
#ifdef XXH_ACCEPT_NULL_INPUT_POINTER
if (input==NULL) return XXH_ERROR;
#endif
state->total_len += len;
if (state->memsize + len < 16) // fill in tmp buffer
{
XXH_memcpy(state->memory + state->memsize, input, len);
state->memsize += len;
return XXH_OK;
}
if (state->memsize) // some data left from previous update
{
XXH_memcpy(state->memory + state->memsize, input, 16-state->memsize);
{
const U32* p32 = (const U32*)state->memory;
state->v1 += XXH_readLE32(p32, endian) * PRIME32_2; state->v1 = XXH_rotl32(state->v1, 13); state->v1 *= PRIME32_1; p32++;
state->v2 += XXH_readLE32(p32, endian) * PRIME32_2; state->v2 = XXH_rotl32(state->v2, 13); state->v2 *= PRIME32_1; p32++;
state->v3 += XXH_readLE32(p32, endian) * PRIME32_2; state->v3 = XXH_rotl32(state->v3, 13); state->v3 *= PRIME32_1; p32++;
state->v4 += XXH_readLE32(p32, endian) * PRIME32_2; state->v4 = XXH_rotl32(state->v4, 13); state->v4 *= PRIME32_1; p32++;
}
p += 16-state->memsize;
state->memsize = 0;
}
if (p <= bEnd-16)
{
const BYTE* const limit = bEnd - 16;
U32 v1 = state->v1;
U32 v2 = state->v2;
U32 v3 = state->v3;
U32 v4 = state->v4;
do
{
v1 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v1 = XXH_rotl32(v1, 13); v1 *= PRIME32_1; p+=4;
v2 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v2 = XXH_rotl32(v2, 13); v2 *= PRIME32_1; p+=4;
v3 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v3 = XXH_rotl32(v3, 13); v3 *= PRIME32_1; p+=4;
v4 += XXH_readLE32((const U32*)p, endian) * PRIME32_2; v4 = XXH_rotl32(v4, 13); v4 *= PRIME32_1; p+=4;
} while (p<=limit);
state->v1 = v1;
state->v2 = v2;
state->v3 = v3;
state->v4 = v4;
}
if (p < bEnd)
{
XXH_memcpy(state->memory, p, bEnd-p);
state->memsize = (int)(bEnd-p);
}
return XXH_OK;
}
XXH_errorcode XXH32_update (void* state_in, const void* input, int len)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_update_endian(state_in, input, len, XXH_littleEndian);
else
return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
}
FORCE_INLINE U32 XXH32_intermediateDigest_endian (void* state_in, XXH_endianess endian)
{
struct XXH_state32_t * state = (struct XXH_state32_t *) state_in;
const BYTE * p = (const BYTE*)state->memory;
BYTE* bEnd = (BYTE*)state->memory + state->memsize;
U32 h32;
if (state->total_len >= 16)
{
h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) + XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
}
else
{
h32 = state->seed + PRIME32_5;
}
h32 += (U32) state->total_len;
while (p<=bEnd-4)
{
h32 += XXH_readLE32((const U32*)p, endian) * PRIME32_3;
h32 = XXH_rotl32(h32, 17) * PRIME32_4;
p+=4;
}
while (p<bEnd)
{
h32 += (*p) * PRIME32_5;
h32 = XXH_rotl32(h32, 11) * PRIME32_1;
p++;
}
h32 ^= h32 >> 15;
h32 *= PRIME32_2;
h32 ^= h32 >> 13;
h32 *= PRIME32_3;
h32 ^= h32 >> 16;
return h32;
}
U32 XXH32_intermediateDigest (void* state_in)
{
XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
if ((endian_detected==XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
return XXH32_intermediateDigest_endian(state_in, XXH_littleEndian);
else
return XXH32_intermediateDigest_endian(state_in, XXH_bigEndian);
}
U32 XXH32_digest (void* state_in)
{
U32 h32 = XXH32_intermediateDigest(state_in);
XXH_free(state_in);
return h32;
}
} // namespace rocksdb

View File

@ -341,12 +341,18 @@ endif()
# Used to run CI build and tests so we can run faster
option(OPTDBG "Build optimized debug build with MSVC" OFF)
option(WITH_RUNTIME_DEBUG "build with debug version of runtime library" ON)
if(MSVC)
if(OPTDBG)
message(STATUS "Debug optimization is enabled")
set(CMAKE_CXX_FLAGS_DEBUG "/Oxt /${RUNTIME_LIBRARY}d")
set(CMAKE_CXX_FLAGS_DEBUG "/Oxt")
else()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm /${RUNTIME_LIBRARY}d")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm")
endif()
if(WITH_RUNTIME_DEBUG)
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /${RUNTIME_LIBRARY}d")
else()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /${RUNTIME_LIBRARY}")
endif()
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oxt /Zp8 /Gm- /Gy /${RUNTIME_LIBRARY}")
@ -361,7 +367,7 @@ endif()
option(ROCKSDB_LITE "Build RocksDBLite version" OFF)
if(ROCKSDB_LITE)
add_definitions(-DROCKSDB_LITE)
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions")
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fno-exceptions -Os")
endif()
if(CMAKE_SYSTEM_NAME MATCHES "Cygwin")
@ -466,6 +472,7 @@ set(SOURCES
db/compaction_iterator.cc
db/compaction_job.cc
db/compaction_picker.cc
db/compaction_picker_fifo.cc
db/compaction_picker_universal.cc
db/convenience.cc
db/db_filesnapshot.cc
@ -498,6 +505,8 @@ set(SOURCES
db/merge_helper.cc
db/merge_operator.cc
db/range_del_aggregator.cc
db/range_del_aggregator_v2.cc
db/range_tombstone_fragmenter.cc
db/repair.cc
db/snapshot_impl.cc
db/table_cache.cc
@ -571,6 +580,7 @@ set(SOURCES
table/plain_table_index.cc
table/plain_table_key_coding.cc
table/plain_table_reader.cc
table/sst_file_reader.cc
table/sst_file_writer.cc
table/table_properties.cc
table/two_level_iterator.cc
@ -597,6 +607,7 @@ set(SOURCES
util/filename.cc
util/filter_policy.cc
util/hash.cc
util/jemalloc_nodump_allocator.cc
util/log_buffer.cc
util/murmurhash.cc
util/random.cc
@ -617,6 +628,7 @@ set(SOURCES
utilities/blob_db/blob_compaction_filter.cc
utilities/blob_db/blob_db.cc
utilities/blob_db/blob_db_impl.cc
utilities/blob_db/blob_db_impl_filesnapshot.cc
utilities/blob_db/blob_dump_tool.cc
utilities/blob_db/blob_file.cc
utilities/blob_db/blob_log_reader.cc
@ -677,12 +689,10 @@ set(SOURCES
utilities/write_batch_with_index/write_batch_with_index_internal.cc
$<TARGET_OBJECTS:build_version>)
if(HAVE_SSE42 AND NOT FORCE_SSE42)
if(NOT MSVC)
set_source_files_properties(
util/crc32c.cc
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
endif()
if(HAVE_SSE42 AND NOT MSVC)
set_source_files_properties(
util/crc32c.cc
PROPERTIES COMPILE_FLAGS "-msse4.2 -mpclmul")
endif()
if(HAVE_POWER8)
@ -897,6 +907,9 @@ if(WITH_TESTS)
db/perf_context_test.cc
db/plain_table_db_test.cc
db/prefix_test.cc
db/range_del_aggregator_test.cc
db/range_del_aggregator_v2_test.cc
db/range_tombstone_fragmenter_test.cc
db/repair_test.cc
db/table_properties_collector_test.cc
db/version_builder_test.cc
@ -925,6 +938,7 @@ if(WITH_TESTS)
table/data_block_hash_index_test.cc
table/full_filter_block_test.cc
table/merger_test.cc
table/sst_file_reader_test.cc
table/table_test.cc
tools/ldb_cmd_test.cc
tools/reduce_levels_test.cc
@ -944,6 +958,7 @@ if(WITH_TESTS)
util/hash_test.cc
util/heap_test.cc
util/rate_limiter_test.cc
util/repeatable_thread_test.cc
util/slice_transform_test.cc
util/timer_queue_test.cc
util/thread_list_test.cc
@ -986,6 +1001,7 @@ if(WITH_TESTS)
set(BENCHMARKS
cache/cache_bench.cc
memtable/memtablerep_bench.cc
db/range_del_aggregator_bench.cc
tools/db_bench.cc
table/table_reader_bench.cc
utilities/column_aware_encoding_exp.cc

View File

@ -1,13 +1,73 @@
# Rocksdb Change Log
## Unreleased
### Public API Change
### New Features
### Public API Change
### Bug Fixes
* Fix a deadlock caused by compaction and file ingestion waiting for each other in the event of write stalls.
## 5.18.0 (11/30/2018)
### New Features
* Introduced `JemallocNodumpAllocator` memory allocator. When being use, block cache will be excluded from core dump.
* Introduced `PerfContextByLevel` as part of `PerfContext` which allows storing perf context at each level. Also replaced `__thread` with `thread_local` keyword for perf_context. Added per-level perf context for bloom filter and `Get` query.
* With level_compaction_dynamic_level_bytes = true, level multiplier may be adjusted automatically when Level 0 to 1 compaction is lagged behind.
* Introduced DB option `atomic_flush`. If true, RocksDB supports flushing multiple column families and atomically committing the result to MANIFEST. Useful when WAL is disabled.
* Added `num_deletions` and `num_merge_operands` members to `TableProperties`.
* Added "rocksdb.min-obsolete-sst-number-to-keep" DB property that reports the lower bound on SST file numbers that are being kept from deletion, even if the SSTs are obsolete.
* Add xxhash64 checksum support
* Introduced `MemoryAllocator`, which lets the user specify custom memory allocator for block based table.
* Improved `DeleteRange` to prevent read performance degradation. The feature is no longer marked as experimental.
### Public API Change
* `DBOptions::use_direct_reads` now affects reads issued by `BackupEngine` on the database's SSTs.
* `NO_ITERATORS` is divided into two counters `NO_ITERATOR_CREATED` and `NO_ITERATOR_DELETE`. Both of them are only increasing now, just as other counters.
### Bug Fixes
* Fix corner case where a write group leader blocked due to write stall blocks other writers in queue with WriteOptions::no_slowdown set.
* Fix in-memory range tombstone truncation to avoid erroneously covering newer keys at a lower level, and include range tombstones in compacted files whose largest key is the range tombstone's start key.
* Properly set the stop key for a truncated manual CompactRange
* Fix slow flush/compaction when DB contains many snapshots. The problem became noticeable to us in DBs with 100,000+ snapshots, though it will affect others at different thresholds.
* Fix the bug that WriteBatchWithIndex's SeekForPrev() doesn't see the entries with the same key.
* Fix the bug where user comparator was sometimes fed with InternalKey instead of the user key. The bug manifests when during GenerateBottommostFiles.
* Fix a bug in WritePrepared txns where if the number of old snapshots goes beyond the snapshot cache size (128 default) the rest will not be checked when evicting a commit entry from the commit cache.
* Fixed Get correctness bug in the presence of range tombstones where merge operands covered by a range tombstone always result in NotFound.
* Start populating `NO_FILE_CLOSES` ticker statistic, which was always zero previously.
* The default value of NewBloomFilterPolicy()'s argument use_block_based_builder is changed to false. Note that this new default may cause large temp memory usage when building very large SST files.
## 5.17.0 (10/05/2018)
### Public API Change
* `OnTableFileCreated` will now be called for empty files generated during compaction. In that case, `TableFileCreationInfo::file_path` will be "(nil)" and `TableFileCreationInfo::file_size` will be zero.
* Add `FlushOptions::allow_write_stall`, which controls whether Flush calls start working immediately, even if it causes user writes to stall, or will wait until flush can be performed without causing write stall (similar to `CompactRangeOptions::allow_write_stall`). Note that the default value is false, meaning we add delay to Flush calls until stalling can be avoided when possible. This is behavior change compared to previous RocksDB versions, where Flush calls didn't check if they might cause stall or not.
* Application using PessimisticTransactionDB is expected to rollback/commit recovered transactions before starting new ones. This assumption is used to skip concurrency control during recovery.
* Expose column family id to `OnCompactionCompleted`.
### New Features
* TransactionOptions::skip_concurrency_control allows pessimistic transactions to skip the overhead of concurrency control. Could be used for optimizing certain transactions or during recovery.
### Bug Fixes
* Avoid creating empty SSTs and subsequently deleting them in certain cases during compaction.
* Sync CURRENT file contents during checkpoint.
## 5.16.3 (10/1/2018)
### Bug Fixes
* Fix crash caused when `CompactFiles` run with `CompactionOptions::compression == CompressionType::kDisableCompressionOption`. Now that setting causes the compression type to be chosen according to the column family-wide compression options.
## 5.16.2 (9/21/2018)
### Bug Fixes
* Fix bug in partition filters with format_version=4.
## 5.16.1 (9/17/2018)
### Bug Fixes
* Remove trace_analyzer_tool from rocksdb_lib target in TARGETS file.
* Fix RocksDB Java build and tests.
* Remove sync point in Block destructor.
## 5.16.0 (8/21/2018)
### Public API Change
* The merge operands are passed to `MergeOperator::ShouldMerge` in the reversed order relative to how they were merged (passed to FullMerge or FullMergeV2) for performance reasons
* The merge operands are passed to `MergeOperator::ShouldMerge` in the reversed order relative to how they were merged (passed to FullMerge or FullMergeV2) for performance reasons
* GetAllKeyVersions() to take an extra argument of `max_num_ikeys`.
* Using ZSTD dictionary trainer (i.e., setting `CompressionOptions::zstd_max_train_bytes` to a nonzero value) now requires ZSTD version 1.1.3 or later.
### New Features
* Changes the format of index blocks by delta encoding the index values, which are the block handles. This saves the encoding of BlockHandle::offset of the non-head index entries in each restart interval. The feature is backward compatible but not forward compatible. It is disabled by default unless format_version 4 or above is used.
@ -151,7 +211,8 @@
* `BackupableDBOptions::max_valid_backups_to_open == 0` now means no backups will be opened during BackupEngine initialization. Previously this condition disabled limiting backups opened.
* `DBOptions::preserve_deletes` is a new option that allows one to specify that DB should not drop tombstones for regular deletes if they have sequence number larger than what was set by the new API call `DB::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum)`. Disabled by default.
* API call `DB::SetPreserveDeletesSequenceNumber(SequenceNumber seqnum)` was added, users who wish to preserve deletes are expected to periodically call this function to advance the cutoff seqnum (all deletes made before this seqnum can be dropped by DB). It's user responsibility to figure out how to advance the seqnum in the way so the tombstones are kept for the desired period of time, yet are eventually processed in time and don't eat up too much space.
* `ReadOptions::iter_start_seqnum` was added; if set to something > 0 user will see 2 changes in iterators behavior 1) only keys written with sequence larger than this parameter would be returned and 2) the `Slice` returned by iter->key() now points to the memory that keep User-oriented representation of the internal key, rather than user key. New struct `FullKey` was added to represent internal keys, along with a new helper function `ParseFullKey(const Slice& internal_key, FullKey* result);`.
* `ReadOptions::iter_start_seqnum` was added;
if set to something > 0 user will see 2 changes in iterators behavior 1) only keys written with sequence larger than this parameter would be returned and 2) the `Slice` returned by iter->key() now points to the memory that keep User-oriented representation of the internal key, rather than user key. New struct `FullKey` was added to represent internal keys, along with a new helper function `ParseFullKey(const Slice& internal_key, FullKey* result);`.
* Deprecate trash_dir param in NewSstFileManager, right now we will rename deleted files to <name>.trash instead of moving them to trash directory
* Allow setting a custom trash/DB size ratio limit in the SstFileManager, after which files that are to be scheduled for deletion are deleted immediately, regardless of any delete ratelimit.
* Return an error on write if write_options.sync = true and write_options.disableWAL = true to warn user of inconsistent options. Previously we will not write to WAL and not respecting the sync options in this case.

View File

@ -93,19 +93,38 @@ ifeq ($(MAKECMDGOALS),rocksdbjavastaticpublish)
DEBUG_LEVEL=0
endif
# Lite build flag.
LITE ?= 0
ifeq ($(LITE), 0)
ifneq ($(filter -DROCKSDB_LITE,$(OPT)),)
# Be backward compatible and support older format where OPT=-DROCKSDB_LITE is
# specified instead of LITE=1 on the command line.
LITE=1
endif
else ifeq ($(LITE), 1)
ifeq ($(filter -DROCKSDB_LITE,$(OPT)),)
OPT += -DROCKSDB_LITE
endif
endif
# Figure out optimize level.
ifneq ($(DEBUG_LEVEL), 2)
ifeq ($(LITE), 0)
OPT += -O2
else
OPT += -Os
endif
endif
# compile with -O2 if debug level is not 2
ifneq ($(DEBUG_LEVEL), 2)
OPT += -O2 -fno-omit-frame-pointer
OPT += -fno-omit-frame-pointer
# Skip for archs that don't support -momit-leaf-frame-pointer
ifeq (,$(shell $(CXX) -fsyntax-only -momit-leaf-frame-pointer -xc /dev/null 2>&1))
OPT += -momit-leaf-frame-pointer
endif
endif
ifeq (,$(shell $(CXX) -fsyntax-only -faligned-new -xc++ /dev/null 2>&1))
CXXFLAGS += -faligned-new -DHAVE_ALIGNED_NEW
endif
ifeq (,$(shell $(CXX) -fsyntax-only -maltivec -xc /dev/null 2>&1))
CXXFLAGS += -DHAS_ALTIVEC
CFLAGS += -DHAS_ALTIVEC
@ -322,7 +341,7 @@ endif
ifeq ("$(wildcard $(LUA_LIB))", "") # LUA_LIB does not exist
$(error $(LUA_LIB) does not exist. Try to specify both LUA_PATH and LUA_LIB manually)
endif
LDFLAGS += $(LUA_LIB)
EXEC_LDFLAGS += $(LUA_LIB)
endif
@ -533,6 +552,10 @@ TESTS = \
write_unprepared_transaction_test \
db_universal_compaction_test \
trace_analyzer_test \
repeatable_thread_test \
range_tombstone_fragmenter_test \
range_del_aggregator_v2_test \
sst_file_reader_test \
PARALLEL_TEST = \
backupable_db_test \
@ -582,7 +605,7 @@ TEST_LIBS = \
librocksdb_env_basic_test.a
# TODO: add back forward_iterator_bench, after making it build in all environemnts.
BENCHMARKS = db_bench table_reader_bench cache_bench memtablerep_bench column_aware_encoding_exp persistent_cache_bench
BENCHMARKS = db_bench table_reader_bench cache_bench memtablerep_bench column_aware_encoding_exp persistent_cache_bench range_del_aggregator_bench
# if user didn't config LIBNAME, set the default
ifeq ($(LIBNAME),)
@ -883,6 +906,7 @@ crash_test: whitebox_crash_test blackbox_crash_test
blackbox_crash_test: db_stress
python -u tools/db_crashtest.py --simple blackbox $(CRASH_TEST_EXT_ARGS)
python -u tools/db_crashtest.py --enable_atomic_flush blackbox $(CRASH_TEST_EXT_ARGS)
python -u tools/db_crashtest.py blackbox $(CRASH_TEST_EXT_ARGS)
ifeq ($(CRASH_TEST_KILL_ODD),)
@ -891,6 +915,8 @@ endif
whitebox_crash_test: db_stress
python -u tools/db_crashtest.py --simple whitebox --random_kill_odd \
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
python -u tools/db_crashtest.py --enable_atomic_flush whitebox --random_kill_odd \
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
python -u tools/db_crashtest.py whitebox --random_kill_odd \
$(CRASH_TEST_KILL_ODD) $(CRASH_TEST_EXT_ARGS)
@ -1002,8 +1028,10 @@ unity.a: unity.o
$(AM_V_AR)rm -f $@
$(AM_V_at)$(AR) $(ARFLAGS) $@ unity.o
TOOLLIBOBJECTS = $(TOOL_LIB_SOURCES:.cc=.o)
# try compiling db_test with unity
unity_test: db/db_test.o db/db_test_util.o $(TESTHARNESS) unity.a
unity_test: db/db_test.o db/db_test_util.o $(TESTHARNESS) $(TOOLLIBOBJECTS) unity.a
$(AM_LINK)
./unity_test
@ -1548,9 +1576,24 @@ lua_test: utilities/lua/rocks_lua_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTH
range_del_aggregator_test: db/range_del_aggregator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
range_del_aggregator_bench: db/range_del_aggregator_bench.o $(LIBOBJECTS) $(TESTUTIL)
$(AM_LINK)
blob_db_test: utilities/blob_db/blob_db_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
repeatable_thread_test: util/repeatable_thread_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
range_tombstone_fragmenter_test: db/range_tombstone_fragmenter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
range_del_aggregator_v2_test: db/range_del_aggregator_v2_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
sst_file_reader_test: table/sst_file_reader_test.o $(LIBOBJECTS) $(TESTHARNESS)
$(AM_LINK)
#-------------------------------------------------
# make install related stuff
INSTALL_PATH ?= /usr/local
@ -1621,7 +1664,7 @@ ZLIB_SHA256 ?= c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1
ZLIB_DOWNLOAD_BASE ?= http://zlib.net
BZIP2_VER ?= 1.0.6
BZIP2_SHA256 ?= a2848f34fcd5d6cf47def00461fcb528a0484d8edef8208d6d2e2909dc61d9cd
BZIP2_DOWNLOAD_BASE ?= http://www.bzip.org
BZIP2_DOWNLOAD_BASE ?= https://web.archive.org/web/20180624184835/http://www.bzip.org
SNAPPY_VER ?= 1.1.4
SNAPPY_SHA256 ?= 134bfe122fd25599bb807bb8130e7ba6d9bdb851e0b16efcb83ac4f5d0b70057
SNAPPY_DOWNLOAD_BASE ?= https://github.com/google/snappy/releases/download
@ -1856,7 +1899,7 @@ $(java_libobjects): jl/%.o: %.cc
rocksdbjava: $(java_all_libobjects)
$(AM_V_GEN)cd java;$(MAKE) javalib;
$(AM_V_at)rm -f ./java/target/$(ROCKSDBJNILIB)
$(AM_V_at)$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(java_libobjects) $(JAVA_LDFLAGS) $(COVERAGEFLAGS)
$(AM_V_at)$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(java_all_libobjects) $(JAVA_LDFLAGS) $(COVERAGEFLAGS)
$(AM_V_at)cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md
$(AM_V_at)cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB)
$(AM_V_at)cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class

View File

@ -1,3 +1,5 @@
load("@fbcode_macros//build_defs:auto_headers.bzl", "AutoHeaders")
REPO_PATH = package_name() + "/"
BUCK_BINS = "buck-out/gen/" + REPO_PATH
@ -65,6 +67,16 @@ is_opt_mode = build_mode.startswith("opt")
if is_opt_mode:
rocksdb_compiler_flags.append("-DNDEBUG")
default_allocator = read_config("fbcode", "default_allocator")
sanitizer = read_config("fbcode", "sanitizer")
# Let RocksDB aware of jemalloc existence.
# Do not enable it if sanitizer presents.
if is_opt_mode and default_allocator.startswith("jemalloc") and sanitizer == "":
rocksdb_compiler_flags.append("-DROCKSDB_JEMALLOC")
rocksdb_external_deps.append(("jemalloc", None, "headers"))
cpp_library(
name = "rocksdb_lib",
srcs = [
@ -79,6 +91,7 @@ cpp_library(
"db/compaction_iterator.cc",
"db/compaction_job.cc",
"db/compaction_picker.cc",
"db/compaction_picker_fifo.cc",
"db/compaction_picker_universal.cc",
"db/convenience.cc",
"db/db_filesnapshot.cc",
@ -111,6 +124,8 @@ cpp_library(
"db/merge_helper.cc",
"db/merge_operator.cc",
"db/range_del_aggregator.cc",
"db/range_del_aggregator_v2.cc",
"db/range_tombstone_fragmenter.cc",
"db/repair.cc",
"db/snapshot_impl.cc",
"db/table_cache.cc",
@ -188,6 +203,7 @@ cpp_library(
"table/plain_table_index.cc",
"table/plain_table_key_coding.cc",
"table/plain_table_reader.cc",
"table/sst_file_reader.cc",
"table/sst_file_writer.cc",
"table/table_properties.cc",
"table/two_level_iterator.cc",
@ -195,7 +211,6 @@ cpp_library(
"tools/ldb_cmd.cc",
"tools/ldb_tool.cc",
"tools/sst_dump_tool.cc",
"tools/trace_analyzer_tool.cc",
"util/arena.cc",
"util/auto_roll_logger.cc",
"util/bloom.cc",
@ -214,6 +229,7 @@ cpp_library(
"util/filename.cc",
"util/filter_policy.cc",
"util/hash.cc",
"util/jemalloc_nodump_allocator.cc",
"util/log_buffer.cc",
"util/murmurhash.cc",
"util/random.cc",
@ -233,6 +249,7 @@ cpp_library(
"utilities/blob_db/blob_compaction_filter.cc",
"utilities/blob_db/blob_db.cc",
"utilities/blob_db/blob_db_impl.cc",
"utilities/blob_db/blob_db_impl_filesnapshot.cc",
"utilities/blob_db/blob_dump_tool.cc",
"utilities/blob_db/blob_file.cc",
"utilities/blob_db/blob_log_format.cc",
@ -290,7 +307,7 @@ cpp_library(
"utilities/write_batch_with_index/write_batch_with_index.cc",
"utilities/write_batch_with_index/write_batch_with_index_internal.cc",
],
headers = AutoHeaders.RECURSIVE_GLOB,
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
@ -303,6 +320,7 @@ cpp_library(
srcs = [
"db/db_test_util.cc",
"table/mock_table.cc",
"tools/trace_analyzer_tool.cc",
"util/fault_injection_test_env.cc",
"util/testharness.cc",
"util/testutil.cc",
@ -311,7 +329,7 @@ cpp_library(
"utilities/col_buf_encoder.cc",
"utilities/column_aware_encoding_util.cc",
],
headers = AutoHeaders.RECURSIVE_GLOB,
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
@ -323,9 +341,10 @@ cpp_library(
name = "rocksdb_tools_lib",
srcs = [
"tools/db_bench_tool.cc",
"tools/trace_analyzer_tool.cc",
"util/testutil.cc",
],
headers = AutoHeaders.RECURSIVE_GLOB,
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
@ -336,7 +355,7 @@ cpp_library(
cpp_library(
name = "env_basic_test_lib",
srcs = ["env/env_basic_test.cc"],
headers = AutoHeaders.RECURSIVE_GLOB,
auto_headers = AutoHeaders.RECURSIVE_GLOB,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
@ -376,11 +395,6 @@ ROCKS_TESTS = [
"table/block_based_filter_block_test.cc",
"serial",
],
[
"data_block_hash_index_test",
"table/data_block_hash_index_test.cc",
"serial",
],
[
"block_test",
"table/block_test.cc",
@ -506,6 +520,11 @@ ROCKS_TESTS = [
"table/cuckoo_table_reader_test.cc",
"serial",
],
[
"data_block_hash_index_test",
"table/data_block_hash_index_test.cc",
"serial",
],
[
"date_tiered_test",
"utilities/date_tiered/date_tiered_test.cc",
@ -916,6 +935,16 @@ ROCKS_TESTS = [
"db/range_del_aggregator_test.cc",
"serial",
],
[
"range_del_aggregator_v2_test",
"db/range_del_aggregator_v2_test.cc",
"serial",
],
[
"range_tombstone_fragmenter_test",
"db/range_tombstone_fragmenter_test.cc",
"serial",
],
[
"rate_limiter_test",
"util/rate_limiter_test.cc",
@ -931,6 +960,11 @@ ROCKS_TESTS = [
"db/repair_test.cc",
"serial",
],
[
"repeatable_thread_test",
"util/repeatable_thread_test.cc",
"serial",
],
[
"sim_cache_test",
"utilities/simulator_cache/sim_cache_test.cc",
@ -957,8 +991,8 @@ ROCKS_TESTS = [
"serial",
],
[
"trace_analyzer_test",
"tools/trace_analyzer_test.cc",
"sst_file_reader_test",
"table/sst_file_reader_test.cc",
"serial",
],
[
@ -996,6 +1030,11 @@ ROCKS_TESTS = [
"util/timer_queue_test.cc",
"serial",
],
[
"trace_analyzer_test",
"tools/trace_analyzer_test.cc",
"serial",
],
[
"transaction_test",
"utilities/transactions/transaction_test.cc",
@ -1078,19 +1117,19 @@ if not is_opt_mode:
ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
test_bin = test_name + "_bin"
cpp_binary (
name = test_bin,
srcs = [test_cc],
deps = [":rocksdb_test_lib"],
preprocessor_flags = rocksdb_preprocessor_flags,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
external_deps = rocksdb_external_deps,
cpp_binary(
name = test_bin,
srcs = [test_cc],
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
deps = [":rocksdb_test_lib"],
external_deps = rocksdb_external_deps,
)
custom_unittest(
name = test_name,
type = ttype,
deps = [":" + test_bin],
command = [TEST_RUNNER, BUCK_BINS + test_bin]
name = test_name,
command = [TEST_RUNNER, BUCK_BINS + test_bin],
type = ttype,
deps = [":" + test_bin],
)

View File

@ -12,6 +12,7 @@ At Facebook, we use RocksDB as storage engines in multiple data management servi
6. LogDevice -- a distributed data store for logs [2]
[1] https://research.facebook.com/publications/realtime-data-processing-at-facebook/
[2] https://code.facebook.com/posts/357056558062811/logdevice-a-distributed-data-store-for-logs/
## LinkedIn
@ -88,3 +89,6 @@ LzLabs is using RocksDB as a storage engine in their multi-database distributed
## ProfaneDB
[ProfaneDB](https://profanedb.gitlab.io/) is a database for Protocol Buffers, and uses RocksDB for storage. It is accessible via gRPC, and the schema is defined using directly `.proto` files.
## IOTA Foundation
[IOTA Foundation](https://www.iota.org/) is using RocksDB in the [IOTA Reference Implementation (IRI)](https://github.com/iotaledger/iri) to store the local state of the Tangle. The Tangle is the first open-source distributed ledger powering the future of the Internet of Things.

View File

@ -109,12 +109,14 @@ def generate_targets(repo_path):
"rocksdb_test_lib",
src_mk.get("MOCK_LIB_SOURCES", []) +
src_mk.get("TEST_LIB_SOURCES", []) +
src_mk.get("EXP_LIB_SOURCES", []),
src_mk.get("EXP_LIB_SOURCES", []) +
src_mk.get("ANALYZER_LIB_SOURCES", []),
[":rocksdb_lib"])
# rocksdb_tools_lib
TARGETS.add_library(
"rocksdb_tools_lib",
src_mk.get("BENCH_LIB_SOURCES", []) +
src_mk.get("ANALYZER_LIB_SOURCES", []) +
["util/testutil.cc"],
[":rocksdb_lib"])

View File

@ -10,7 +10,7 @@ def pretty_list(lst, indent=8):
if len(lst) == 1:
return "\"%s\"" % lst[0]
separator = "\",\n%s\"" % (" " * indent)
res = separator.join(sorted(lst))
res = "\n" + (" " * indent) + "\"" + res + "\",\n" + (" " * (indent - 4))
@ -31,13 +31,16 @@ class TARGETSBuilder:
self.targets_file.close()
def add_library(self, name, srcs, deps=None, headers=None):
headers_attr_prefix = ""
if headers is None:
headers_attr_prefix = "auto_"
headers = "AutoHeaders.RECURSIVE_GLOB"
self.targets_file.write(targets_cfg.library_template % (
name,
pretty_list(srcs),
headers,
pretty_list(deps)))
self.targets_file.write(targets_cfg.library_template.format(
name=name,
srcs=pretty_list(srcs),
headers_attr_prefix=headers_attr_prefix,
headers=headers,
deps=pretty_list(deps)))
self.total_lib = self.total_lib + 1
def add_binary(self, name, srcs, deps=None):

View File

@ -2,7 +2,9 @@ from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
rocksdb_target_header = """REPO_PATH = package_name() + "/"
rocksdb_target_header = """load("@fbcode_macros//build_defs:auto_headers.bzl", "AutoHeaders")
REPO_PATH = package_name() + "/"
BUCK_BINS = "buck-out/gen/" + REPO_PATH
@ -68,18 +70,28 @@ is_opt_mode = build_mode.startswith("opt")
# doesn't harm and avoid forgetting to add it.
if is_opt_mode:
rocksdb_compiler_flags.append("-DNDEBUG")
default_allocator = read_config("fbcode", "default_allocator")
sanitizer = read_config("fbcode", "sanitizer")
# Let RocksDB aware of jemalloc existence.
# Do not enable it if sanitizer presents.
if is_opt_mode and default_allocator.startswith("jemalloc") and sanitizer == "":
rocksdb_compiler_flags.append("-DROCKSDB_JEMALLOC")
rocksdb_external_deps.append(("jemalloc", None, "headers"))
"""
library_template = """
cpp_library(
name = "%s",
srcs = [%s],
headers = %s,
name = "{name}",
srcs = [{srcs}],
{headers_attr_prefix}headers = {headers},
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
deps = [%s],
deps = [{deps}],
external_deps = rocksdb_external_deps,
)
"""
@ -118,21 +130,20 @@ if not is_opt_mode:
ttype = "gtest" if test_cfg[2] == "parallel" else "simple"
test_bin = test_name + "_bin"
cpp_binary (
name = test_bin,
srcs = [test_cc],
deps = [":rocksdb_test_lib"],
preprocessor_flags = rocksdb_preprocessor_flags,
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
external_deps = rocksdb_external_deps,
cpp_binary(
name = test_bin,
srcs = [test_cc],
arch_preprocessor_flags = rocksdb_arch_preprocessor_flags,
compiler_flags = rocksdb_compiler_flags,
preprocessor_flags = rocksdb_preprocessor_flags,
deps = [":rocksdb_test_lib"],
external_deps = rocksdb_external_deps,
)
custom_unittest(
name = test_name,
type = ttype,
deps = [":" + test_bin],
command = [TEST_RUNNER, BUCK_BINS + test_bin]
name = test_name,
command = [TEST_RUNNER, BUCK_BINS + test_bin],
type = ttype,
deps = [":" + test_bin],
)
"""

View File

@ -7,12 +7,12 @@
// Name of the environment variables which need to be set by the entity which
// triggers continuous runs so that code at the end of the file gets executed
// and Sandcastle run starts.
define("ENV_POST_RECEIVE_HOOK", "POST_RECEIVE_HOOK");
define("ENV_HTTPS_APP_VALUE", "HTTPS_APP_VALUE");
define("ENV_HTTPS_TOKEN_VALUE", "HTTPS_TOKEN_VALUE");
const ENV_POST_RECEIVE_HOOK = "POST_RECEIVE_HOOK";
const ENV_HTTPS_APP_VALUE = "HTTPS_APP_VALUE";
const ENV_HTTPS_TOKEN_VALUE = "HTTPS_TOKEN_VALUE";
define("PRIMARY_TOKEN_FILE", '/home/krad/.sandcastle');
define("CONT_RUN_ALIAS", "leveldb");
const PRIMARY_TOKEN_FILE = '/home/krad/.sandcastle';
const CONT_RUN_ALIAS = "leveldb";
//////////////////////////////////////////////////////////////////////
/* Run tests in sandcastle */

View File

@ -44,7 +44,7 @@ if test -z "$OUTPUT"; then
fi
# we depend on C++11
PLATFORM_CXXFLAGS="-std=c++14"
PLATFORM_CXXFLAGS="-std=c++11"
# we currently depend on POSIX platform
COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX"
@ -474,6 +474,17 @@ EOF
COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_SCHED_GETCPU_PRESENT"
fi
fi
if ! test $ROCKSDB_DISABLE_ALIGNED_NEW; then
# Test whether c++17 aligned-new is supported
$CXX $PLATFORM_CXXFLAGS -faligned-new -x c++ - -o /dev/null 2>/dev/null <<EOF
struct alignas(1024) t {int a;};
int main() {}
EOF
if [ "$?" = 0 ]; then
PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS -faligned-new -DHAVE_ALIGNED_NEW"
fi
fi
fi
# TODO(tec): Fix -Wshorten-64-to-32 errors on FreeBSD and enable the warning.

View File

@ -5082,8 +5082,8 @@ sub openoutputfiles {
# Set reading FD if using --group (--ungroup does not need)
for my $fdno (1,2) {
# Re-open the file for reading
# so fdw can be closed seperately
# and fdr can be seeked seperately (for --line-buffer)
# so fdw can be closed separately
# and fdr can be seeked separately (for --line-buffer)
open(my $fdr,"<", $self->fh($fdno,'name')) ||
::die_bug("fdr: Cannot open ".$self->fh($fdno,'name'));
$self->set_fh($fdno,'r',$fdr);

View File

@ -85,7 +85,6 @@ NON_SHM="TMPD=/tmp/rocksdb_test_tmp"
GCC_481="ROCKSDB_FBCODE_BUILD_WITH_481=1"
ASAN="COMPILE_WITH_ASAN=1"
CLANG="USE_CLANG=1"
LITE="OPT=\"-DROCKSDB_LITE -g\""
TSAN="COMPILE_WITH_TSAN=1"
UBSAN="COMPILE_WITH_UBSAN=1"
TSAN_CRASH='CRASH_TEST_EXT_ARGS="--compression_type=zstd --log2_keys_per_lock=22"'
@ -345,7 +344,7 @@ LITE_BUILD_COMMANDS="[
$CLEANUP_ENV,
{
'name':'Build RocksDB debug version',
'shell':'$LITE make J=1 all check || $CONTRUN_NAME=lite $TASK_CREATION_TOOL',
'shell':'make J=1 LITE=1 all check || $CONTRUN_NAME=lite $TASK_CREATION_TOOL',
'user':'root',
$PARSER
},
@ -663,12 +662,12 @@ run_regression()
# === lite build ===
make clean
OPT=-DROCKSDB_LITE make -j$(nproc) static_lib
make LITE=1 -j$(nproc) static_lib
send_size_to_ods static_lib_lite $(stat --printf="%s" librocksdb.a)
strip librocksdb.a
send_size_to_ods static_lib_lite_stripped $(stat --printf="%s" librocksdb.a)
OPT=-DROCKSDB_LITE make -j$(nproc) shared_lib
make LITE=1 -j$(nproc) shared_lib
send_size_to_ods shared_lib_lite $(stat --printf="%s" `readlink -f librocksdb.so`)
strip `readlink -f librocksdb.so`
send_size_to_ods shared_lib_lite_stripped $(stat --printf="%s" `readlink -f librocksdb.so`)

View File

@ -64,8 +64,8 @@ class CacheTest : public testing::TestWithParam<std::string> {
std::vector<int> deleted_keys_;
std::vector<int> deleted_values_;
shared_ptr<Cache> cache_;
shared_ptr<Cache> cache2_;
std::shared_ptr<Cache> cache_;
std::shared_ptr<Cache> cache2_;
CacheTest()
: cache_(NewCache(kCacheSize, kNumShardBits, false)),
@ -145,7 +145,7 @@ class CacheTest : public testing::TestWithParam<std::string> {
CacheTest* CacheTest::current_;
TEST_P(CacheTest, UsageTest) {
// cache is shared_ptr and will be automatically cleaned up.
// cache is std::shared_ptr and will be automatically cleaned up.
const uint64_t kCapacity = 100000;
auto cache = NewCache(kCapacity, 8, false);
@ -173,7 +173,7 @@ TEST_P(CacheTest, UsageTest) {
}
TEST_P(CacheTest, PinnedUsageTest) {
// cache is shared_ptr and will be automatically cleaned up.
// cache is std::shared_ptr and will be automatically cleaned up.
const uint64_t kCapacity = 100000;
auto cache = NewCache(kCapacity, 8, false);

View File

@ -461,8 +461,10 @@ std::string LRUCacheShard::GetPrintableOptions() const {
}
LRUCache::LRUCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit, double high_pri_pool_ratio)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit) {
bool strict_capacity_limit, double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> allocator)
: ShardedCache(capacity, num_shard_bits, strict_capacity_limit,
std::move(allocator)) {
num_shards_ = 1 << num_shard_bits;
shards_ = reinterpret_cast<LRUCacheShard*>(
port::cacheline_aligned_alloc(sizeof(LRUCacheShard) * num_shards_));
@ -537,12 +539,14 @@ double LRUCache::GetHighPriPoolRatio() {
std::shared_ptr<Cache> NewLRUCache(const LRUCacheOptions& cache_opts) {
return NewLRUCache(cache_opts.capacity, cache_opts.num_shard_bits,
cache_opts.strict_capacity_limit,
cache_opts.high_pri_pool_ratio);
cache_opts.high_pri_pool_ratio,
cache_opts.memory_allocator);
}
std::shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit,
double high_pri_pool_ratio) {
std::shared_ptr<Cache> NewLRUCache(
size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator) {
if (num_shard_bits >= 20) {
return nullptr; // the cache cannot be sharded into too many fine pieces
}
@ -554,7 +558,8 @@ std::shared_ptr<Cache> NewLRUCache(size_t capacity, int num_shard_bits,
num_shard_bits = GetDefaultCacheShardBits(capacity);
}
return std::make_shared<LRUCache>(capacity, num_shard_bits,
strict_capacity_limit, high_pri_pool_ratio);
strict_capacity_limit, high_pri_pool_ratio,
std::move(memory_allocator));
}
} // namespace rocksdb

View File

@ -279,7 +279,8 @@ class ALIGN_AS(CACHE_LINE_SIZE) LRUCacheShard : public CacheShard {
class LRUCache : public ShardedCache {
public:
LRUCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
double high_pri_pool_ratio);
double high_pri_pool_ratio,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr);
virtual ~LRUCache();
virtual const char* Name() const override { return "LRUCache"; }
virtual CacheShard* GetShard(int shard) override;

View File

@ -20,8 +20,10 @@
namespace rocksdb {
ShardedCache::ShardedCache(size_t capacity, int num_shard_bits,
bool strict_capacity_limit)
: num_shard_bits_(num_shard_bits),
bool strict_capacity_limit,
std::shared_ptr<MemoryAllocator> allocator)
: Cache(std::move(allocator)),
num_shard_bits_(num_shard_bits),
capacity_(capacity),
strict_capacity_limit_(strict_capacity_limit),
last_id_(1) {}
@ -142,6 +144,9 @@ std::string ShardedCache::GetPrintableOptions() const {
strict_capacity_limit_);
ret.append(buffer);
}
snprintf(buffer, kBufferSize, " memory_allocator : %s\n",
memory_allocator() ? memory_allocator()->Name() : "None");
ret.append(buffer);
ret.append(GetShard(0)->GetPrintableOptions());
return ret;
}

View File

@ -47,7 +47,8 @@ class CacheShard {
// Keys are sharded by the highest num_shard_bits bits of hash value.
class ShardedCache : public Cache {
public:
ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit);
ShardedCache(size_t capacity, int num_shard_bits, bool strict_capacity_limit,
std::shared_ptr<MemoryAllocator> memory_allocator = nullptr);
virtual ~ShardedCache() = default;
virtual const char* Name() const override = 0;
virtual CacheShard* GetShard(int shard) = 0;

View File

@ -18,6 +18,7 @@
#include "db/event_helpers.h"
#include "db/internal_stats.h"
#include "db/merge_helper.h"
#include "db/range_del_aggregator_v2.h"
#include "db/table_cache.h"
#include "db/version_edit.h"
#include "monitoring/iostats_context_imp.h"
@ -65,8 +66,9 @@ Status BuildTable(
const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions,
const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
TableCache* table_cache, InternalIterator* iter,
std::unique_ptr<InternalIterator> range_del_iter, FileMetaData* meta,
const InternalKeyComparator& internal_comparator,
std::vector<std::unique_ptr<FragmentedRangeTombstoneIterator>>
range_del_iters,
FileMetaData* meta, const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories,
uint32_t column_family_id, const std::string& column_family_name,
@ -86,12 +88,10 @@ Status BuildTable(
Status s;
meta->fd.file_size = 0;
iter->SeekToFirst();
std::unique_ptr<RangeDelAggregator> range_del_agg(
new RangeDelAggregator(internal_comparator, snapshots));
s = range_del_agg->AddTombstones(std::move(range_del_iter));
if (!s.ok()) {
// may be non-ok if a range tombstone key is unparsable
return s;
std::unique_ptr<CompactionRangeDelAggregatorV2> range_del_agg(
new CompactionRangeDelAggregatorV2(&internal_comparator, snapshots));
for (auto& range_del_iter : range_del_iters) {
range_del_agg->AddTombstones(std::move(range_del_iter));
}
std::string fname = TableFileName(ioptions.cf_paths, meta->fd.GetNumber(),
@ -104,9 +104,9 @@ Status BuildTable(
if (iter->Valid() || !range_del_agg->IsEmpty()) {
TableBuilder* builder;
unique_ptr<WritableFileWriter> file_writer;
std::unique_ptr<WritableFileWriter> file_writer;
{
unique_ptr<WritableFile> file;
std::unique_ptr<WritableFile> file;
#ifndef NDEBUG
bool use_direct_writes = env_options.use_direct_writes;
TEST_SYNC_POINT_CALLBACK("BuildTable:create_file", &use_direct_writes);
@ -121,8 +121,9 @@ Status BuildTable(
file->SetIOPriority(io_priority);
file->SetWriteLifeTimeHint(write_hint);
file_writer.reset(new WritableFileWriter(std::move(file), env_options,
ioptions.statistics));
file_writer.reset(new WritableFileWriter(std::move(file), fname,
env_options, ioptions.statistics,
ioptions.listeners));
builder = NewTableBuilder(
ioptions, mutable_cf_options, internal_comparator,
int_tbl_prop_collector_factories, column_family_id,
@ -157,8 +158,10 @@ Status BuildTable(
}
}
for (auto it = range_del_agg->NewIterator(); it->Valid(); it->Next()) {
auto tombstone = it->Tombstone();
auto range_del_it = range_del_agg->NewIterator();
for (range_del_it->SeekToFirst(); range_del_it->Valid();
range_del_it->Next()) {
auto tombstone = range_del_it->Tombstone();
auto kv = tombstone.Serialize();
builder->Add(kv.first.Encode(), kv.second);
meta->UpdateBoundariesForRange(kv.first, tombstone.SerializeEndKey(),

View File

@ -9,6 +9,7 @@
#include <string>
#include <utility>
#include <vector>
#include "db/range_tombstone_fragmenter.h"
#include "db/table_properties_collector.h"
#include "options/cf_options.h"
#include "rocksdb/comparator.h"
@ -65,8 +66,9 @@ extern Status BuildTable(
const std::string& dbname, Env* env, const ImmutableCFOptions& options,
const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options,
TableCache* table_cache, InternalIterator* iter,
std::unique_ptr<InternalIterator> range_del_iter, FileMetaData* meta,
const InternalKeyComparator& internal_comparator,
std::vector<std::unique_ptr<FragmentedRangeTombstoneIterator>>
range_del_iters,
FileMetaData* meta, const InternalKeyComparator& internal_comparator,
const std::vector<std::unique_ptr<IntTblPropCollectorFactory>>*
int_tbl_prop_collector_factories,
uint32_t column_family_id, const std::string& column_family_name,

View File

@ -33,6 +33,7 @@
#include "rocksdb/utilities/backupable_db.h"
#include "rocksdb/utilities/checkpoint.h"
#include "rocksdb/utilities/db_ttl.h"
#include "rocksdb/utilities/memory_util.h"
#include "rocksdb/utilities/optimistic_transaction_db.h"
#include "rocksdb/utilities/transaction.h"
#include "rocksdb/utilities/transaction_db.h"
@ -41,6 +42,10 @@
#include "rocksdb/perf_context.h"
#include "utilities/merge_operators.h"
#include <vector>
#include <unordered_set>
#include <map>
using rocksdb::BytewiseComparator;
using rocksdb::Cache;
using rocksdb::ColumnFamilyDescriptor;
@ -108,8 +113,12 @@ using rocksdb::TransactionLogIterator;
using rocksdb::BatchResult;
using rocksdb::PerfLevel;
using rocksdb::PerfContext;
using rocksdb::MemoryUtil;
using std::shared_ptr;
using std::vector;
using std::unordered_set;
using std::map;
extern "C" {
@ -142,14 +151,20 @@ struct rocksdb_writablefile_t { WritableFile* rep; };
struct rocksdb_wal_iterator_t { TransactionLogIterator* rep; };
struct rocksdb_wal_readoptions_t { TransactionLogIterator::ReadOptions rep; };
struct rocksdb_filelock_t { FileLock* rep; };
struct rocksdb_logger_t { shared_ptr<Logger> rep; };
struct rocksdb_cache_t { shared_ptr<Cache> rep; };
struct rocksdb_logger_t {
std::shared_ptr<Logger> rep;
};
struct rocksdb_cache_t {
std::shared_ptr<Cache> rep;
};
struct rocksdb_livefiles_t { std::vector<LiveFileMetaData> rep; };
struct rocksdb_column_family_handle_t { ColumnFamilyHandle* rep; };
struct rocksdb_envoptions_t { EnvOptions rep; };
struct rocksdb_ingestexternalfileoptions_t { IngestExternalFileOptions rep; };
struct rocksdb_sstfilewriter_t { SstFileWriter* rep; };
struct rocksdb_ratelimiter_t { shared_ptr<RateLimiter> rep; };
struct rocksdb_ratelimiter_t {
std::shared_ptr<RateLimiter> rep;
};
struct rocksdb_perfcontext_t { PerfContext* rep; };
struct rocksdb_pinnableslice_t {
PinnableSlice rep;
@ -2402,7 +2417,7 @@ void rocksdb_options_set_bytes_per_sync(
void rocksdb_options_set_writable_file_max_buffer_size(rocksdb_options_t* opt,
uint64_t v) {
opt->rep.writable_file_max_buffer_size = v;
opt->rep.writable_file_max_buffer_size = static_cast<size_t>(v);
}
void rocksdb_options_set_allow_concurrent_memtable_write(rocksdb_options_t* opt,
@ -2433,11 +2448,13 @@ void rocksdb_options_set_max_write_buffer_number_to_maintain(
opt->rep.max_write_buffer_number_to_maintain = n;
}
void rocksdb_options_set_enable_pipelined_write(rocksdb_options_t* opt, unsigned char v) {
void rocksdb_options_set_enable_pipelined_write(rocksdb_options_t* opt,
unsigned char v) {
opt->rep.enable_pipelined_write = v;
}
void rocksdb_options_set_max_subcompactions(rocksdb_options_t* opt, uint32_t n) {
void rocksdb_options_set_max_subcompactions(rocksdb_options_t* opt,
uint32_t n) {
opt->rep.max_subcompactions = n;
}
@ -3533,6 +3550,18 @@ const char* rocksdb_livefiles_largestkey(
return lf->rep[index].largestkey.data();
}
uint64_t rocksdb_livefiles_entries(
const rocksdb_livefiles_t* lf,
int index) {
return lf->rep[index].num_entries;
}
uint64_t rocksdb_livefiles_deletions(
const rocksdb_livefiles_t* lf,
int index) {
return lf->rep[index].num_deletions;
}
extern void rocksdb_livefiles_destroy(
const rocksdb_livefiles_t* lf) {
delete lf;
@ -4099,6 +4128,98 @@ const char* rocksdb_pinnableslice_value(const rocksdb_pinnableslice_t* v,
*vlen = v->rep.size();
return v->rep.data();
}
// container to keep databases and caches in order to use rocksdb::MemoryUtil
struct rocksdb_memory_consumers_t {
std::vector<rocksdb_t*> dbs;
std::unordered_set<rocksdb_cache_t*> caches;
};
// initializes new container of memory consumers
rocksdb_memory_consumers_t* rocksdb_memory_consumers_create() {
return new rocksdb_memory_consumers_t;
}
// adds datatabase to the container of memory consumers
void rocksdb_memory_consumers_add_db(rocksdb_memory_consumers_t* consumers,
rocksdb_t* db) {
consumers->dbs.push_back(db);
}
// adds cache to the container of memory consumers
void rocksdb_memory_consumers_add_cache(rocksdb_memory_consumers_t* consumers,
rocksdb_cache_t* cache) {
consumers->caches.insert(cache);
}
// deletes container with memory consumers
void rocksdb_memory_consumers_destroy(rocksdb_memory_consumers_t* consumers) {
delete consumers;
}
// contains memory usage statistics provided by rocksdb::MemoryUtil
struct rocksdb_memory_usage_t {
uint64_t mem_table_total;
uint64_t mem_table_unflushed;
uint64_t mem_table_readers_total;
uint64_t cache_total;
};
// estimates amount of memory occupied by consumers (dbs and caches)
rocksdb_memory_usage_t* rocksdb_approximate_memory_usage_create(
rocksdb_memory_consumers_t* consumers, char** errptr) {
vector<DB*> dbs;
for (auto db : consumers->dbs) {
dbs.push_back(db->rep);
}
unordered_set<const Cache*> cache_set;
for (auto cache : consumers->caches) {
cache_set.insert(const_cast<const Cache*>(cache->rep.get()));
}
std::map<rocksdb::MemoryUtil::UsageType, uint64_t> usage_by_type;
auto status = MemoryUtil::GetApproximateMemoryUsageByType(dbs, cache_set,
&usage_by_type);
if (SaveError(errptr, status)) {
return nullptr;
}
auto result = new rocksdb_memory_usage_t;
result->mem_table_total = usage_by_type[MemoryUtil::kMemTableTotal];
result->mem_table_unflushed = usage_by_type[MemoryUtil::kMemTableUnFlushed];
result->mem_table_readers_total = usage_by_type[MemoryUtil::kTableReadersTotal];
result->cache_total = usage_by_type[MemoryUtil::kCacheTotal];
return result;
}
uint64_t rocksdb_approximate_memory_usage_get_mem_table_total(
rocksdb_memory_usage_t* memory_usage) {
return memory_usage->mem_table_total;
}
uint64_t rocksdb_approximate_memory_usage_get_mem_table_unflushed(
rocksdb_memory_usage_t* memory_usage) {
return memory_usage->mem_table_unflushed;
}
uint64_t rocksdb_approximate_memory_usage_get_mem_table_readers_total(
rocksdb_memory_usage_t* memory_usage) {
return memory_usage->mem_table_readers_total;
}
uint64_t rocksdb_approximate_memory_usage_get_cache_total(
rocksdb_memory_usage_t* memory_usage) {
return memory_usage->cache_total;
}
// deletes container with memory usage estimates
void rocksdb_approximate_memory_usage_destroy(rocksdb_memory_usage_t* usage) {
delete usage;
}
} // end extern "C"
#endif // !ROCKSDB_LITE

View File

@ -19,11 +19,8 @@
// Can not use port/port.h macros as this is a c file
#ifdef OS_WIN
#include <windows.h>
#define snprintf _snprintf
// Ok for uniqueness
int geteuid() {
int result = 0;
@ -34,6 +31,11 @@ int geteuid() {
return result;
}
// VS < 2015
#if defined(_MSC_VER) && (_MSC_VER < 1900)
#define snprintf _snprintf
#endif
#endif
const char* phase = "";
@ -47,12 +49,19 @@ static void StartPhase(const char* name) {
fprintf(stderr, "=== Test %s\n", name);
phase = name;
}
#ifdef _MSC_VER
#pragma warning(push)
#pragma warning (disable: 4996) // getenv security warning
#endif
static const char* GetTempDir(void) {
const char* ret = getenv("TEST_TMPDIR");
if (ret == NULL || ret[0] == '\0')
ret = "/tmp";
return ret;
}
#ifdef _MSC_VER
#pragma warning(pop)
#endif
#define CheckNoError(err) \
if ((err) != NULL) { \
@ -643,7 +652,7 @@ int main(int argc, char** argv) {
rocksdb_sstfilewriter_t* writer =
rocksdb_sstfilewriter_create(env_opt, io_options);
unlink(sstfilename);
remove(sstfilename);
rocksdb_sstfilewriter_open(writer, sstfilename, &err);
CheckNoError(err);
rocksdb_sstfilewriter_put(writer, "sstk1", 5, "v1", 2, &err);
@ -664,7 +673,7 @@ int main(int argc, char** argv) {
CheckGet(db, roptions, "sstk2", "v2");
CheckGet(db, roptions, "sstk3", "v3");
unlink(sstfilename);
remove(sstfilename);
rocksdb_sstfilewriter_open(writer, sstfilename, &err);
CheckNoError(err);
rocksdb_sstfilewriter_put(writer, "sstk2", 5, "v4", 2, &err);
@ -1334,6 +1343,47 @@ int main(int argc, char** argv) {
rocksdb_destroy_db(options, dbname, &err);
}
// Check memory usage stats
StartPhase("approximate_memory_usage");
{
// Create database
db = rocksdb_open(options, dbname, &err);
CheckNoError(err);
rocksdb_memory_consumers_t* consumers;
consumers = rocksdb_memory_consumers_create();
rocksdb_memory_consumers_add_db(consumers, db);
rocksdb_memory_consumers_add_cache(consumers, cache);
// take memory usage report before write-read operation
rocksdb_memory_usage_t* mu1;
mu1 = rocksdb_approximate_memory_usage_create(consumers, &err);
CheckNoError(err);
// Put data (this should affect memtables)
rocksdb_put(db, woptions, "memory", 6, "test", 4, &err);
CheckNoError(err);
CheckGet(db, roptions, "memory", "test");
// take memory usage report after write-read operation
rocksdb_memory_usage_t* mu2;
mu2 = rocksdb_approximate_memory_usage_create(consumers, &err);
CheckNoError(err);
// amount of memory used within memtables should grow
CheckCondition(rocksdb_approximate_memory_usage_get_mem_table_total(mu2) >=
rocksdb_approximate_memory_usage_get_mem_table_total(mu1));
CheckCondition(rocksdb_approximate_memory_usage_get_mem_table_unflushed(mu2) >=
rocksdb_approximate_memory_usage_get_mem_table_unflushed(mu1));
rocksdb_memory_consumers_destroy(consumers);
rocksdb_approximate_memory_usage_destroy(mu1);
rocksdb_approximate_memory_usage_destroy(mu2);
rocksdb_close(db);
rocksdb_destroy_db(options, dbname, &err);
CheckNoError(err);
}
StartPhase("cuckoo_options");
{
rocksdb_cuckoo_table_options_t* cuckoo_options;
@ -1675,7 +1725,7 @@ int main(int argc, char** argv) {
db = rocksdb_open(options, dbname, &err);
CheckNoError(err);
}
StartPhase("cleanup");
rocksdb_close(db);
rocksdb_options_destroy(options);

View File

@ -20,10 +20,12 @@
#include <limits>
#include "db/compaction_picker.h"
#include "db/compaction_picker_fifo.h"
#include "db/compaction_picker_universal.h"
#include "db/db_impl.h"
#include "db/internal_stats.h"
#include "db/job_context.h"
#include "db/range_del_aggregator_v2.h"
#include "db/table_properties_collector.h"
#include "db/version_set.h"
#include "db/write_controller.h"
@ -105,9 +107,6 @@ void GetIntTblPropCollectorFactory(
int_tbl_prop_collector_factories->emplace_back(
new UserKeyTablePropertiesCollectorFactory(collector_factories[i]));
}
// Add collector to collect internal key statistics
int_tbl_prop_collector_factories->emplace_back(
new InternalKeyPropertiesCollectorFactory);
}
Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) {
@ -945,26 +944,16 @@ Status ColumnFamilyData::RangesOverlapWithMemtables(
super_version->imm->AddIterators(read_opts, &merge_iter_builder);
ScopedArenaIterator memtable_iter(merge_iter_builder.Finish());
std::vector<InternalIterator*> memtable_range_del_iters;
auto read_seq = super_version->current->version_set()->LastSequence();
ReadRangeDelAggregatorV2 range_del_agg(&internal_comparator_, read_seq);
auto* active_range_del_iter =
super_version->mem->NewRangeTombstoneIterator(read_opts);
if (active_range_del_iter != nullptr) {
memtable_range_del_iters.push_back(active_range_del_iter);
}
super_version->imm->AddRangeTombstoneIterators(read_opts,
&memtable_range_del_iters);
RangeDelAggregator range_del_agg(internal_comparator_, {} /* snapshots */,
false /* collapse_deletions */);
super_version->mem->NewRangeTombstoneIterator(read_opts, read_seq);
range_del_agg.AddTombstones(
std::unique_ptr<FragmentedRangeTombstoneIterator>(active_range_del_iter));
super_version->imm->AddRangeTombstoneIterators(read_opts, nullptr /* arena */,
&range_del_agg);
Status status;
{
std::unique_ptr<InternalIterator> memtable_range_del_iter(
NewMergingIterator(&internal_comparator_,
memtable_range_del_iters.empty()
? nullptr
: &memtable_range_del_iters[0],
static_cast<int>(memtable_range_del_iters.size())));
status = range_del_agg.AddTombstones(std::move(memtable_range_del_iter));
}
for (size_t i = 0; i < ranges.size() && status.ok() && !*overlap; ++i) {
auto* vstorage = super_version->current->storage_info();
auto* ucmp = vstorage->InternalComparator()->user_comparator();

View File

@ -47,7 +47,7 @@ class EnvCounter : public EnvWrapper {
int GetNumberOfNewWritableFileCalls() {
return num_new_writable_file_;
}
Status NewWritableFile(const std::string& f, unique_ptr<WritableFile>* r,
Status NewWritableFile(const std::string& f, std::unique_ptr<WritableFile>* r,
const EnvOptions& soptions) override {
++num_new_writable_file_;
return EnvWrapper::NewWritableFile(f, r, soptions);
@ -486,9 +486,9 @@ class ColumnFamilyTestBase : public testing::Test {
void CopyFile(const std::string& source, const std::string& destination,
uint64_t size = 0) {
const EnvOptions soptions;
unique_ptr<SequentialFile> srcfile;
std::unique_ptr<SequentialFile> srcfile;
ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions));
unique_ptr<WritableFile> destfile;
std::unique_ptr<WritableFile> destfile;
ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions));
if (size == 0) {
@ -513,6 +513,16 @@ class ColumnFamilyTestBase : public testing::Test {
return static_cast<int>(files.size());
}
void RecalculateWriteStallConditions(ColumnFamilyData* cfd,
const MutableCFOptions& mutable_cf_options) {
// add lock to avoid race condition between
// `RecalculateWriteStallConditions` which writes to CFStats and
// background `DBImpl::DumpStats()` threads which read CFStats
dbfull()->TEST_LockMutex();
cfd->RecalculateWriteStallConditions(mutable_cf_options);
dbfull()-> TEST_UnlockMutex();
}
std::vector<ColumnFamilyHandle*> handles_;
std::vector<std::string> names_;
std::vector<std::set<std::string>> keys_;
@ -2500,139 +2510,139 @@ TEST_P(ColumnFamilyTest, WriteStallSingleColumnFamily) {
mutable_cf_options.disable_auto_compactions = false;
vstorage->TEST_set_estimated_compaction_needed_bytes(50);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage->TEST_set_estimated_compaction_needed_bytes(201);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(400);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(500);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(450);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(205);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(202);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(201);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(198);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage->TEST_set_estimated_compaction_needed_bytes(399);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(599);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(2001);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(3001);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage->TEST_set_estimated_compaction_needed_bytes(390);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(100);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage->set_l0_delay_trigger_count(100);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(101);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
vstorage->set_l0_delay_trigger_count(0);
vstorage->TEST_set_estimated_compaction_needed_bytes(300);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
vstorage->set_l0_delay_trigger_count(101);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25 / 1.25 / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(200);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
vstorage->set_l0_delay_trigger_count(0);
vstorage->TEST_set_estimated_compaction_needed_bytes(0);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
mutable_cf_options.disable_auto_compactions = true;
dbfull()->TEST_write_controler().set_delayed_write_rate(kBaseRate);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage->set_l0_delay_trigger_count(50);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(0, GetDbDelayedWriteRate());
@ -2640,7 +2650,7 @@ TEST_P(ColumnFamilyTest, WriteStallSingleColumnFamily) {
vstorage->set_l0_delay_trigger_count(60);
vstorage->TEST_set_estimated_compaction_needed_bytes(300);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(0, GetDbDelayedWriteRate());
@ -2649,14 +2659,14 @@ TEST_P(ColumnFamilyTest, WriteStallSingleColumnFamily) {
mutable_cf_options.disable_auto_compactions = false;
vstorage->set_l0_delay_trigger_count(70);
vstorage->TEST_set_estimated_compaction_needed_bytes(500);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage->set_l0_delay_trigger_count(71);
vstorage->TEST_set_estimated_compaction_needed_bytes(501);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
@ -2681,31 +2691,31 @@ TEST_P(ColumnFamilyTest, CompactionSpeedupSingleColumnFamily) {
mutable_cf_options.hard_pending_compaction_bytes_limit = 2000;
vstorage->TEST_set_estimated_compaction_needed_bytes(40);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(50);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(300);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(45);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(7);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(9);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(6);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
// Speed up threshold = min(4 * 2, 4 + (12 - 4)/4) = 6
@ -2714,15 +2724,15 @@ TEST_P(ColumnFamilyTest, CompactionSpeedupSingleColumnFamily) {
mutable_cf_options.level0_stop_writes_trigger = 30;
vstorage->set_l0_delay_trigger_count(5);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(7);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(3);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
}
@ -2749,53 +2759,53 @@ TEST_P(ColumnFamilyTest, WriteStallTwoColumnFamilies) {
mutable_cf_options1.soft_pending_compaction_bytes_limit = 500;
vstorage->TEST_set_estimated_compaction_needed_bytes(50);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage1->TEST_set_estimated_compaction_needed_bytes(201);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay());
vstorage1->TEST_set_estimated_compaction_needed_bytes(600);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(70);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate, GetDbDelayedWriteRate());
vstorage1->TEST_set_estimated_compaction_needed_bytes(800);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(300);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
vstorage1->TEST_set_estimated_compaction_needed_bytes(700);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
vstorage->TEST_set_estimated_compaction_needed_bytes(500);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25 / 1.25, GetDbDelayedWriteRate());
vstorage1->TEST_set_estimated_compaction_needed_bytes(600);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_TRUE(!IsDbWriteStopped());
ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay());
ASSERT_EQ(kBaseRate / 1.25, GetDbDelayedWriteRate());
@ -2828,41 +2838,41 @@ TEST_P(ColumnFamilyTest, CompactionSpeedupTwoColumnFamilies) {
mutable_cf_options1.level0_slowdown_writes_trigger = 16;
vstorage->TEST_set_estimated_compaction_needed_bytes(40);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(60);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage1->TEST_set_estimated_compaction_needed_bytes(30);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage1->TEST_set_estimated_compaction_needed_bytes(70);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->TEST_set_estimated_compaction_needed_bytes(20);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage1->TEST_set_estimated_compaction_needed_bytes(3);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(9);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage1->set_l0_delay_trigger_count(2);
cfd1->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd1, mutable_cf_options);
ASSERT_EQ(6, dbfull()->TEST_BGCompactionsAllowed());
vstorage->set_l0_delay_trigger_count(0);
cfd->RecalculateWriteStallConditions(mutable_cf_options);
RecalculateWriteStallConditions(cfd, mutable_cf_options);
ASSERT_EQ(1, dbfull()->TEST_BGCompactionsAllowed());
}

View File

@ -308,6 +308,55 @@ TEST_F(CompactFilesTest, CompactionFilterWithGetSv) {
delete db;
}
TEST_F(CompactFilesTest, SentinelCompressionType) {
if (!Zlib_Supported()) {
fprintf(stderr, "zlib compression not supported, skip this test\n");
return;
}
if (!Snappy_Supported()) {
fprintf(stderr, "snappy compression not supported, skip this test\n");
return;
}
// Check that passing `CompressionType::kDisableCompressionOption` to
// `CompactFiles` causes it to use the column family compression options.
for (auto compaction_style :
{CompactionStyle::kCompactionStyleLevel,
CompactionStyle::kCompactionStyleUniversal,
CompactionStyle::kCompactionStyleNone}) {
DestroyDB(db_name_, Options());
Options options;
options.compaction_style = compaction_style;
// L0: Snappy, L1: ZSTD, L2: Snappy
options.compression_per_level = {CompressionType::kSnappyCompression,
CompressionType::kZlibCompression,
CompressionType::kSnappyCompression};
options.create_if_missing = true;
FlushedFileCollector* collector = new FlushedFileCollector();
options.listeners.emplace_back(collector);
DB* db = nullptr;
ASSERT_OK(DB::Open(options, db_name_, &db));
db->Put(WriteOptions(), "key", "val");
db->Flush(FlushOptions());
auto l0_files = collector->GetFlushedFiles();
ASSERT_EQ(1, l0_files.size());
// L0->L1 compaction, so output should be ZSTD-compressed
CompactionOptions compaction_opts;
compaction_opts.compression = CompressionType::kDisableCompressionOption;
ASSERT_OK(db->CompactFiles(compaction_opts, l0_files, 1));
rocksdb::TablePropertiesCollection all_tables_props;
ASSERT_OK(db->GetPropertiesOfAllTables(&all_tables_props));
for (const auto& name_and_table_props : all_tables_props) {
ASSERT_EQ(CompressionTypeToString(CompressionType::kZlibCompression),
name_and_table_props.second->compression_name);
}
delete db;
}
}
} // namespace rocksdb
int main(int argc, char** argv) {

View File

@ -25,22 +25,12 @@ CompactedDBImpl::~CompactedDBImpl() {
}
size_t CompactedDBImpl::FindFile(const Slice& key) {
size_t left = 0;
size_t right = files_.num_files - 1;
while (left < right) {
size_t mid = (left + right) >> 1;
const FdWithKeyRange& f = files_.files[mid];
if (user_comparator_->Compare(ExtractUserKey(f.largest_key), key) < 0) {
// Key at "mid.largest" is < "target". Therefore all
// files at or before "mid" are uninteresting.
left = mid + 1;
} else {
// Key at "mid.largest" is >= "target". Therefore all files
// after "mid" are uninteresting.
right = mid;
}
}
return right;
auto cmp = [&](const FdWithKeyRange& f, const Slice& k) -> bool {
return user_comparator_->Compare(ExtractUserKey(f.largest_key), k) < 0;
};
return static_cast<size_t>(std::lower_bound(files_.files,
files_.files + right, key, cmp) - files_.files);
}
Status CompactedDBImpl::Get(const ReadOptions& options, ColumnFamilyHandle*,
@ -157,6 +147,7 @@ Status CompactedDBImpl::Open(const Options& options,
std::unique_ptr<CompactedDBImpl> db(new CompactedDBImpl(db_options, dbname));
Status s = db->Init(options);
if (s.ok()) {
db->StartTimedTasks();
ROCKS_LOG_INFO(db->immutable_db_options_.info_log,
"Opened the db as fully compacted mode");
LogFlush(db->immutable_db_options_.info_log);

View File

@ -23,6 +23,43 @@
namespace rocksdb {
const uint64_t kRangeTombstoneSentinel =
PackSequenceAndType(kMaxSequenceNumber, kTypeRangeDeletion);
int sstableKeyCompare(const Comparator* user_cmp, const InternalKey& a,
const InternalKey& b) {
auto c = user_cmp->Compare(a.user_key(), b.user_key());
if (c != 0) {
return c;
}
auto a_footer = ExtractInternalKeyFooter(a.Encode());
auto b_footer = ExtractInternalKeyFooter(b.Encode());
if (a_footer == kRangeTombstoneSentinel) {
if (b_footer != kRangeTombstoneSentinel) {
return -1;
}
} else if (b_footer == kRangeTombstoneSentinel) {
return 1;
}
return 0;
}
int sstableKeyCompare(const Comparator* user_cmp, const InternalKey* a,
const InternalKey& b) {
if (a == nullptr) {
return -1;
}
return sstableKeyCompare(user_cmp, *a, b);
}
int sstableKeyCompare(const Comparator* user_cmp, const InternalKey& a,
const InternalKey* b) {
if (b == nullptr) {
return -1;
}
return sstableKeyCompare(user_cmp, a, *b);
}
uint64_t TotalFileSize(const std::vector<FileMetaData*>& files) {
uint64_t sum = 0;
for (size_t i = 0; i < files.size() && files[i]; i++) {
@ -81,6 +118,49 @@ void Compaction::GetBoundaryKeys(
}
}
std::vector<CompactionInputFiles> Compaction::PopulateWithAtomicBoundaries(
VersionStorageInfo* vstorage, std::vector<CompactionInputFiles> inputs) {
const Comparator* ucmp = vstorage->InternalComparator()->user_comparator();
for (size_t i = 0; i < inputs.size(); i++) {
if (inputs[i].level == 0 || inputs[i].files.empty()) {
continue;
}
inputs[i].atomic_compaction_unit_boundaries.reserve(inputs[i].files.size());
AtomicCompactionUnitBoundary cur_boundary;
size_t first_atomic_idx = 0;
auto add_unit_boundary = [&](size_t to) {
if (first_atomic_idx == to) return;
for (size_t k = first_atomic_idx; k < to; k++) {
inputs[i].atomic_compaction_unit_boundaries.push_back(cur_boundary);
}
first_atomic_idx = to;
};
for (size_t j = 0; j < inputs[i].files.size(); j++) {
const auto* f = inputs[i].files[j];
if (j == 0) {
// First file in a level.
cur_boundary.smallest = &f->smallest;
cur_boundary.largest = &f->largest;
} else if (sstableKeyCompare(ucmp, *cur_boundary.largest, f->smallest) ==
0) {
// SSTs overlap but the end key of the previous file was not
// artificially extended by a range tombstone. Extend the current
// boundary.
cur_boundary.largest = &f->largest;
} else {
// Atomic compaction unit has ended.
add_unit_boundary(j);
cur_boundary.smallest = &f->smallest;
cur_boundary.largest = &f->largest;
}
}
add_unit_boundary(inputs[i].files.size());
assert(inputs[i].files.size() ==
inputs[i].atomic_compaction_unit_boundaries.size());
}
return inputs;
}
// helper function to determine if compaction is creating files at the
// bottommost level
bool Compaction::IsBottommostLevel(
@ -155,7 +235,7 @@ Compaction::Compaction(VersionStorageInfo* vstorage,
output_compression_(_compression),
output_compression_opts_(_compression_opts),
deletion_compaction_(_deletion_compaction),
inputs_(std::move(_inputs)),
inputs_(PopulateWithAtomicBoundaries(vstorage, std::move(_inputs))),
grandparents_(std::move(_grandparents)),
score_(_score),
bottommost_level_(IsBottommostLevel(output_level_, vstorage, inputs_)),
@ -331,12 +411,14 @@ const char* Compaction::InputLevelSummary(
if (!is_first) {
len +=
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, " + ");
len = std::min(len, static_cast<int>(sizeof(scratch->buffer)));
} else {
is_first = false;
}
len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
"%" ROCKSDB_PRIszt "@%d", input_level.size(),
input_level.level);
len = std::min(len, static_cast<int>(sizeof(scratch->buffer)));
}
snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len,
" files to L%d", output_level());

View File

@ -15,11 +15,43 @@
namespace rocksdb {
// Utility for comparing sstable boundary keys. Returns -1 if either a or b is
// null which provides the property that a==null indicates a key that is less
// than any key and b==null indicates a key that is greater than any key. Note
// that the comparison is performed primarily on the user-key portion of the
// key. If the user-keys compare equal, an additional test is made to sort
// range tombstone sentinel keys before other keys with the same user-key. The
// result is that 2 user-keys will compare equal if they differ purely on
// their sequence number and value, but the range tombstone sentinel for that
// user-key will compare not equal. This is necessary because the range
// tombstone sentinel key is set as the largest key for an sstable even though
// that key never appears in the database. We don't want adjacent sstables to
// be considered overlapping if they are separated by the range tombstone
// sentinel.
int sstableKeyCompare(const Comparator* user_cmp, const InternalKey& a,
const InternalKey& b);
int sstableKeyCompare(const Comparator* user_cmp, const InternalKey* a,
const InternalKey& b);
int sstableKeyCompare(const Comparator* user_cmp, const InternalKey& a,
const InternalKey* b);
// An AtomicCompactionUnitBoundary represents a range of keys [smallest,
// largest] that exactly spans one ore more neighbouring SSTs on the same
// level. Every pair of SSTs in this range "overlap" (i.e., the largest
// user key of one file is the smallest user key of the next file). These
// boundaries are propagated down to RangeDelAggregator during compaction
// to provide safe truncation boundaries for range tombstones.
struct AtomicCompactionUnitBoundary {
const InternalKey* smallest = nullptr;
const InternalKey* largest = nullptr;
};
// The structure that manages compaction input files associated
// with the same physical level.
struct CompactionInputFiles {
int level;
std::vector<FileMetaData*> files;
std::vector<AtomicCompactionUnitBoundary> atomic_compaction_unit_boundaries;
inline bool empty() const { return files.empty(); }
inline size_t size() const { return files.size(); }
inline void clear() { files.clear(); }
@ -96,6 +128,12 @@ class Compaction {
return inputs_[compaction_input_level][i];
}
const std::vector<AtomicCompactionUnitBoundary>* boundaries(
size_t compaction_input_level) const {
assert(compaction_input_level < inputs_.size());
return &inputs_[compaction_input_level].atomic_compaction_unit_boundaries;
}
// Returns the list of file meta data of the specified compaction
// input level.
// REQUIREMENT: "compaction_input_level" must be >= 0 and
@ -262,6 +300,13 @@ class Compaction {
const std::vector<CompactionInputFiles>& inputs,
Slice* smallest_key, Slice* largest_key);
// Get the atomic file boundaries for all files in the compaction. Necessary
// in order to avoid the scenario described in
// https://github.com/facebook/rocksdb/pull/4432#discussion_r221072219 and plumb
// down appropriate key boundaries to RangeDelAggregator during compaction.
static std::vector<CompactionInputFiles> PopulateWithAtomicBoundaries(
VersionStorageInfo* vstorage, std::vector<CompactionInputFiles> inputs);
// helper function to determine if compaction with inputs and storage is
// bottommost
static bool IsBottommostLevel(

View File

@ -18,8 +18,8 @@ CompactionIterator::CompactionIterator(
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
RangeDelAggregator* range_del_agg,
const Compaction* compaction, const CompactionFilter* compaction_filter,
CompactionRangeDelAggregatorV2* range_del_agg, const Compaction* compaction,
const CompactionFilter* compaction_filter,
const std::atomic<bool>* shutting_down,
const SequenceNumber preserve_deletes_seqnum)
: CompactionIterator(
@ -36,7 +36,7 @@ CompactionIterator::CompactionIterator(
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
RangeDelAggregator* range_del_agg,
CompactionRangeDelAggregatorV2* range_del_agg,
std::unique_ptr<CompactionProxy> compaction,
const CompactionFilter* compaction_filter,
const std::atomic<bool>* shutting_down,
@ -77,6 +77,12 @@ CompactionIterator::CompactionIterator(
earliest_snapshot_ = snapshots_->at(0);
latest_snapshot_ = snapshots_->back();
}
#ifndef NDEBUG
// findEarliestVisibleSnapshot assumes this ordering.
for (size_t i = 1; i < snapshots_->size(); ++i) {
assert(snapshots_->at(i - 1) <= snapshots_->at(i));
}
#endif
if (compaction_filter_ != nullptr) {
if (compaction_filter_->IgnoreSnapshots()) {
ignore_snapshots_ = true;
@ -505,6 +511,31 @@ void CompactionIterator::NextFromInput() {
++iter_stats_.num_optimized_del_drop_obsolete;
}
input_->Next();
} else if ((ikey_.type == kTypeDeletion) && bottommost_level_ &&
ikeyNotNeededForIncrementalSnapshot()) {
// Handle the case where we have a delete key at the bottom most level
// We can skip outputting the key iff there are no subsequent puts for this
// key
ParsedInternalKey next_ikey;
input_->Next();
// Skip over all versions of this key that happen to occur in the same snapshot
// range as the delete
while (input_->Valid() &&
ParseInternalKey(input_->key(), &next_ikey) &&
cmp_->Equal(ikey_.user_key, next_ikey.user_key) &&
(prev_snapshot == 0 || next_ikey.sequence > prev_snapshot ||
(snapshot_checker_ != nullptr &&
UNLIKELY(!snapshot_checker_->IsInSnapshot(next_ikey.sequence,
prev_snapshot))))) {
input_->Next();
}
// If you find you still need to output a row with this key, we need to output the
// delete too
if (input_->Valid() && ParseInternalKey(input_->key(), &next_ikey) &&
cmp_->Equal(ikey_.user_key, next_ikey.user_key)) {
valid_ = true;
at_next_ = true;
}
} else if (ikey_.type == kTypeMerge) {
if (!merge_helper_->HasOperator()) {
status_ = Status::InvalidArgument(
@ -603,18 +634,23 @@ void CompactionIterator::PrepareOutput() {
inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot(
SequenceNumber in, SequenceNumber* prev_snapshot) {
assert(snapshots_->size());
SequenceNumber prev = kMaxSequenceNumber;
for (const auto cur : *snapshots_) {
assert(prev == kMaxSequenceNumber || prev <= cur);
if (cur >= in && (snapshot_checker_ == nullptr ||
snapshot_checker_->IsInSnapshot(in, cur))) {
*prev_snapshot = prev == kMaxSequenceNumber ? 0 : prev;
auto snapshots_iter = std::lower_bound(
snapshots_->begin(), snapshots_->end(), in);
if (snapshots_iter == snapshots_->begin()) {
*prev_snapshot = 0;
} else {
*prev_snapshot = *std::prev(snapshots_iter);
assert(*prev_snapshot < in);
}
for (; snapshots_iter != snapshots_->end(); ++snapshots_iter) {
auto cur = *snapshots_iter;
assert(in <= cur);
if (snapshot_checker_ == nullptr ||
snapshot_checker_->IsInSnapshot(in, cur)) {
return cur;
}
prev = cur;
assert(prev < kMaxSequenceNumber);
*prev_snapshot = cur;
}
*prev_snapshot = prev;
return kMaxSequenceNumber;
}

View File

@ -13,7 +13,7 @@
#include "db/compaction_iteration_stats.h"
#include "db/merge_helper.h"
#include "db/pinned_iterators_manager.h"
#include "db/range_del_aggregator.h"
#include "db/range_del_aggregator_v2.h"
#include "db/snapshot_checker.h"
#include "options/cf_options.h"
#include "rocksdb/compaction_filter.h"
@ -64,7 +64,7 @@ class CompactionIterator {
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
RangeDelAggregator* range_del_agg,
CompactionRangeDelAggregatorV2* range_del_agg,
const Compaction* compaction = nullptr,
const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr,
@ -77,7 +77,7 @@ class CompactionIterator {
SequenceNumber earliest_write_conflict_snapshot,
const SnapshotChecker* snapshot_checker, Env* env,
bool report_detailed_time, bool expect_valid_internal_key,
RangeDelAggregator* range_del_agg,
CompactionRangeDelAggregatorV2* range_del_agg,
std::unique_ptr<CompactionProxy> compaction,
const CompactionFilter* compaction_filter = nullptr,
const std::atomic<bool>* shutting_down = nullptr,
@ -141,7 +141,7 @@ class CompactionIterator {
Env* env_;
bool report_detailed_time_;
bool expect_valid_internal_key_;
RangeDelAggregator* range_del_agg_;
CompactionRangeDelAggregatorV2* range_del_agg_;
std::unique_ptr<CompactionProxy> compaction_;
const CompactionFilter* compaction_filter_;
const std::atomic<bool>* shutting_down_;

View File

@ -221,10 +221,16 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
MergeOperator* merge_op = nullptr, CompactionFilter* filter = nullptr,
bool bottommost_level = false,
SequenceNumber earliest_write_conflict_snapshot = kMaxSequenceNumber) {
std::unique_ptr<InternalIterator> range_del_iter(
std::unique_ptr<InternalIterator> unfragmented_range_del_iter(
new test::VectorIterator(range_del_ks, range_del_vs));
range_del_agg_.reset(new RangeDelAggregator(icmp_, snapshots_));
ASSERT_OK(range_del_agg_->AddTombstones(std::move(range_del_iter)));
auto tombstone_list = std::make_shared<FragmentedRangeTombstoneList>(
std::move(unfragmented_range_del_iter), icmp_);
std::unique_ptr<FragmentedRangeTombstoneIterator> range_del_iter(
new FragmentedRangeTombstoneIterator(tombstone_list, icmp_,
kMaxSequenceNumber));
range_del_agg_.reset(
new CompactionRangeDelAggregatorV2(&icmp_, snapshots_));
range_del_agg_->AddTombstones(std::move(range_del_iter));
std::unique_ptr<CompactionIterator::CompactionProxy> compaction;
if (filter || bottommost_level) {
@ -247,9 +253,8 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
c_iter_.reset(new CompactionIterator(
iter_.get(), cmp_, merge_helper_.get(), last_sequence, &snapshots_,
earliest_write_conflict_snapshot, snapshot_checker_.get(),
Env::Default(), false /* report_detailed_time */,
false, range_del_agg_.get(), std::move(compaction), filter,
&shutting_down_));
Env::Default(), false /* report_detailed_time */, false,
range_del_agg_.get(), std::move(compaction), filter, &shutting_down_));
}
void AddSnapshot(SequenceNumber snapshot,
@ -293,7 +298,7 @@ class CompactionIteratorTest : public testing::TestWithParam<bool> {
std::unique_ptr<MergeHelper> merge_helper_;
std::unique_ptr<LoggingForwardVectorIterator> iter_;
std::unique_ptr<CompactionIterator> c_iter_;
std::unique_ptr<RangeDelAggregator> range_del_agg_;
std::unique_ptr<CompactionRangeDelAggregatorV2> range_del_agg_;
std::unique_ptr<SnapshotChecker> snapshot_checker_;
std::atomic<bool> shutting_down_{false};
FakeCompaction* compaction_proxy_;
@ -672,8 +677,12 @@ TEST_P(CompactionIteratorTest, ZeroOutSequenceAtBottomLevel) {
TEST_P(CompactionIteratorTest, RemoveDeletionAtBottomLevel) {
AddSnapshot(1);
RunTest({test::KeyStr("a", 1, kTypeDeletion),
test::KeyStr("b", 2, kTypeDeletion)},
{"", ""}, {test::KeyStr("b", 2, kTypeDeletion)}, {""},
test::KeyStr("b", 3, kTypeDeletion),
test::KeyStr("b", 1, kTypeValue)},
{"", "", ""},
{test::KeyStr("b", 3, kTypeDeletion),
test::KeyStr("b", 0, kTypeValue)},
{"", ""},
kMaxSequenceNumber /*last_commited_seq*/, nullptr /*merge_operator*/,
nullptr /*compaction_filter*/, true /*bottommost_level*/);
}
@ -842,13 +851,26 @@ TEST_F(CompactionIteratorWithSnapshotCheckerTest,
{test::KeyStr("a", 1, kTypeDeletion), test::KeyStr("b", 2, kTypeDeletion),
test::KeyStr("c", 3, kTypeDeletion)},
{"", "", ""},
{test::KeyStr("b", 2, kTypeDeletion),
test::KeyStr("c", 3, kTypeDeletion)},
{},
{"", ""}, kMaxSequenceNumber /*last_commited_seq*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
true /*bottommost_level*/);
}
TEST_F(CompactionIteratorWithSnapshotCheckerTest,
NotRemoveDeletionIfValuePresentToEarlierSnapshot) {
AddSnapshot(2,1);
RunTest(
{test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 1, kTypeValue),
test::KeyStr("b", 3, kTypeValue)},
{"", "", ""},
{test::KeyStr("a", 4, kTypeDeletion), test::KeyStr("a", 0, kTypeValue),
test::KeyStr("b", 3, kTypeValue)},
{"", "", ""}, kMaxSequenceNumber /*last_commited_seq*/,
nullptr /*merge_operator*/, nullptr /*compaction_filter*/,
true /*bottommost_level*/);
}
TEST_F(CompactionIteratorWithSnapshotCheckerTest,
NotRemoveSingleDeletionIfNotVisibleToEarliestSnapshot) {
AddSnapshot(2, 1);

View File

@ -36,6 +36,7 @@
#include "db/memtable_list.h"
#include "db/merge_context.h"
#include "db/merge_helper.h"
#include "db/range_del_aggregator_v2.h"
#include "db/version_set.h"
#include "monitoring/iostats_context_imp.h"
#include "monitoring/perf_context_imp.h"
@ -200,7 +201,7 @@ struct CompactionJob::SubcompactionState {
return *this;
}
// Because member unique_ptrs do not have these.
// Because member std::unique_ptrs do not have these.
SubcompactionState(const SubcompactionState&) = delete;
SubcompactionState& operator=(const SubcompactionState&) = delete;
@ -511,7 +512,10 @@ void CompactionJob::GenSubcompactionBoundaries() {
// size of data covered by keys in that range
uint64_t sum = 0;
std::vector<RangeWithSize> ranges;
auto* v = cfd->current();
// Get input version from CompactionState since it's already referenced
// earlier in SetInputVersioCompaction::SetInputVersion and will not change
// when db_mutex_ is released below
auto* v = compact_->compaction->input_version();
for (auto it = bounds.begin();;) {
const Slice a = *it;
it++;
@ -521,7 +525,13 @@ void CompactionJob::GenSubcompactionBoundaries() {
}
const Slice b = *it;
// ApproximateSize could potentially create table reader iterator to seek
// to the index block and may incur I/O cost in the process. Unlock db
// mutex to reduce contention
db_mutex_->Unlock();
uint64_t size = versions_->ApproximateSize(v, a, b, start_lvl, out_lvl + 1);
db_mutex_->Lock();
ranges.emplace_back(a, b, size);
sum += size;
}
@ -795,10 +805,13 @@ Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) {
void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
assert(sub_compact != nullptr);
ColumnFamilyData* cfd = sub_compact->compaction->column_family_data();
std::unique_ptr<RangeDelAggregator> range_del_agg(
new RangeDelAggregator(cfd->internal_comparator(), existing_snapshots_));
CompactionRangeDelAggregatorV2 range_del_agg(&cfd->internal_comparator(),
existing_snapshots_);
// Although the v2 aggregator is what the level iterator(s) know about,
// the AddTombstones calls will be propagated down to the v1 aggregator.
std::unique_ptr<InternalIterator> input(versions_->MakeInputIterator(
sub_compact->compaction, range_del_agg.get(), env_optiosn_for_read_));
sub_compact->compaction, &range_del_agg, env_optiosn_for_read_));
AutoThreadOperationStageUpdater stage_updater(
ThreadStatus::STAGE_COMPACTION_PROCESS_KV);
@ -887,7 +900,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
input.get(), cfd->user_comparator(), &merge, versions_->LastSequence(),
&existing_snapshots_, earliest_write_conflict_snapshot_,
snapshot_checker_, env_, ShouldReportDetailedTime(env_, stats_), false,
range_del_agg.get(), sub_compact->compaction, compaction_filter,
&range_del_agg, sub_compact->compaction, compaction_filter,
shutting_down_, preserve_deletes_seqnum_));
auto c_iter = sub_compact->c_iter.get();
c_iter->SeekToFirst();
@ -1025,9 +1038,9 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
next_key = &c_iter->key();
}
CompactionIterationStats range_del_out_stats;
status = FinishCompactionOutputFile(input_status, sub_compact,
range_del_agg.get(),
&range_del_out_stats, next_key);
status =
FinishCompactionOutputFile(input_status, sub_compact, &range_del_agg,
&range_del_out_stats, next_key);
RecordDroppedKeys(range_del_out_stats,
&sub_compact->compaction_job_stats);
if (sub_compact->outputs.size() == 1) {
@ -1077,7 +1090,7 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
}
if (status.ok() && sub_compact->builder == nullptr &&
sub_compact->outputs.size() == 0) {
sub_compact->outputs.size() == 0 && !range_del_agg.IsEmpty()) {
// handle subcompaction containing only range deletions
status = OpenCompactionOutputFile(sub_compact);
}
@ -1086,8 +1099,8 @@ void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) {
// close the output file.
if (sub_compact->builder != nullptr) {
CompactionIterationStats range_del_out_stats;
Status s = FinishCompactionOutputFile(
status, sub_compact, range_del_agg.get(), &range_del_out_stats);
Status s = FinishCompactionOutputFile(status, sub_compact, &range_del_agg,
&range_del_out_stats);
if (status.ok()) {
status = s;
}
@ -1152,7 +1165,7 @@ void CompactionJob::RecordDroppedKeys(
Status CompactionJob::FinishCompactionOutputFile(
const Status& input_status, SubcompactionState* sub_compact,
RangeDelAggregator* range_del_agg,
CompactionRangeDelAggregatorV2* range_del_agg,
CompactionIterationStats* range_del_out_stats,
const Slice* next_table_min_key /* = nullptr */) {
AutoThreadOperationStageUpdater stage_updater(
@ -1204,18 +1217,38 @@ Status CompactionJob::FinishCompactionOutputFile(
if (existing_snapshots_.size() > 0) {
earliest_snapshot = existing_snapshots_[0];
}
auto it = range_del_agg->NewIterator();
bool has_overlapping_endpoints;
if (upper_bound != nullptr && meta->largest.size() > 0) {
has_overlapping_endpoints =
ucmp->Compare(meta->largest.user_key(), *upper_bound) == 0;
} else {
has_overlapping_endpoints = false;
}
auto it = range_del_agg->NewIterator(lower_bound, upper_bound,
has_overlapping_endpoints);
// Position the range tombstone output iterator. There may be tombstone
// fragments that are entirely out of range, so make sure that we do not
// include those.
if (lower_bound != nullptr) {
it->Seek(*lower_bound);
} else {
it->SeekToFirst();
}
for (; it->Valid(); it->Next()) {
auto tombstone = it->Tombstone();
if (upper_bound != nullptr &&
ucmp->Compare(*upper_bound, tombstone.start_key_) <= 0) {
// Tombstones starting at upper_bound or later only need to be included
// in the next table. Break because subsequent tombstones will start
// even later.
break;
if (upper_bound != nullptr) {
int cmp = ucmp->Compare(*upper_bound, tombstone.start_key_);
if ((has_overlapping_endpoints && cmp < 0) ||
(!has_overlapping_endpoints && cmp <= 0)) {
// Tombstones starting after upper_bound only need to be included in
// the next table. If the current SST ends before upper_bound, i.e.,
// `has_overlapping_endpoints == false`, we can also skip over range
// tombstones that start exactly at upper_bound. Such range tombstones
// will be included in the next file and are not relevant to the point
// keys or endpoints of the current file.
break;
}
}
if (bottommost_level_ && tombstone.seq_ <= earliest_snapshot) {
@ -1227,6 +1260,8 @@ Status CompactionJob::FinishCompactionOutputFile(
}
auto kv = tombstone.Serialize();
assert(lower_bound == nullptr ||
ucmp->Compare(*lower_bound, kv.second) < 0);
sub_compact->builder->Add(kv.first.Encode(), kv.second);
InternalKey smallest_candidate = std::move(kv.first);
if (lower_bound != nullptr &&
@ -1308,9 +1343,7 @@ Status CompactionJob::FinishCompactionOutputFile(
// VersionEdit.
assert(!sub_compact->outputs.empty());
sub_compact->outputs.pop_back();
sub_compact->builder.reset();
sub_compact->current_output_file_size = 0;
return s;
meta = nullptr;
}
if (s.ok() && (current_entries > 0 || tp.num_range_deletions > 0)) {
@ -1432,7 +1465,7 @@ Status CompactionJob::OpenCompactionOutputFile(
TableFileCreationReason::kCompaction);
#endif // !ROCKSDB_LITE
// Make the output file
unique_ptr<WritableFile> writable_file;
std::unique_ptr<WritableFile> writable_file;
#ifndef NDEBUG
bool syncpoint_arg = env_options_.use_direct_writes;
TEST_SYNC_POINT_CALLBACK("CompactionJob::OpenCompactionOutputFile",
@ -1464,8 +1497,11 @@ Status CompactionJob::OpenCompactionOutputFile(
writable_file->SetWriteLifeTimeHint(write_hint_);
writable_file->SetPreallocationBlockSize(static_cast<size_t>(
sub_compact->compaction->OutputFilePreallocationSize()));
sub_compact->outfile.reset(new WritableFileWriter(
std::move(writable_file), env_options_, db_options_.statistics.get()));
const auto& listeners =
sub_compact->compaction->immutable_cf_options()->listeners;
sub_compact->outfile.reset(
new WritableFileWriter(std::move(writable_file), fname, env_options_,
db_options_.statistics.get(), listeners));
// If the Column family flag is to only optimize filters for hits,
// we can skip creating filters if this is the bottommost_level where

Some files were not shown because too many files have changed in this diff Show More