1
0
Fork 0

Merge branch 'bug-fix/add-missing-overrides-and-final' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
jsteemann 2018-05-07 23:02:46 +02:00
commit 7f8a1cc614
44 changed files with 101 additions and 120 deletions

View File

@ -309,7 +309,7 @@ protected:
/// @brief Get array value
Slice getArray() const;
public:
public:
/// @brief Clear key value store
void clear();

View File

@ -313,7 +313,7 @@ class DistributeBlock : public BlockWithClients {
bool _allowSpecifiedKeys;
};
class RemoteBlock : public ExecutionBlock {
class RemoteBlock final : public ExecutionBlock {
/// @brief constructors/destructors
public:
RemoteBlock(ExecutionEngine* engine, RemoteNode const* en,

View File

@ -39,7 +39,7 @@ class ExecutionPlan;
struct Collection;
/// @brief class RemoteNode
class RemoteNode : public ExecutionNode {
class RemoteNode final : public ExecutionNode {
friend class ExecutionBlock;
friend class RemoteBlock;
@ -308,7 +308,7 @@ class DistributeNode : public ExecutionNode {
};
/// @brief class GatherNode
class GatherNode : public ExecutionNode {
class GatherNode final : public ExecutionNode {
friend class ExecutionBlock;
friend class GatherBlock;
friend class RedundantCalculationsReplacer;

View File

@ -66,7 +66,7 @@ class GraphNode : public ExecutionNode {
virtual ~GraphNode();
void toVelocyPackHelper(arangodb::velocypack::Builder& nodes,
bool verbose) const;
bool verbose) const override;
/// @brief flag, if smart traversal (enterprise edition only!) is done
bool isSmart() const { return _isSmart; }

View File

@ -34,7 +34,7 @@
namespace arangodb {
class CacheRebalancerThread : public Thread {
class CacheRebalancerThread final : public Thread {
public:
CacheRebalancerThread(cache::Manager* manager, uint64_t interval);
~CacheRebalancerThread();

View File

@ -70,7 +70,7 @@ class PlainCache final : public Cache {
/// fashion. The Result contained in the return value should report an error
/// code in this case. Should not block for long.
//////////////////////////////////////////////////////////////////////////////
Finding find(void const* key, uint32_t keySize);
Finding find(void const* key, uint32_t keySize) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Attempts to insert the given value.
@ -80,7 +80,7 @@ class PlainCache final : public Cache {
/// value if it fails to acquire a lock in a timely fashion. Should not block
/// for long.
//////////////////////////////////////////////////////////////////////////////
Result insert(CachedValue* value);
Result insert(CachedValue* value) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Attempts to remove the given key.
@ -90,12 +90,12 @@ class PlainCache final : public Cache {
/// acquire a lock in a timely fashion. Makes more attempts to acquire a lock
/// before quitting, so may block for longer than find or insert.
//////////////////////////////////////////////////////////////////////////////
Result remove(void const* key, uint32_t keySize);
Result remove(void const* key, uint32_t keySize) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Does nothing; convenience method inheritance compliance
//////////////////////////////////////////////////////////////////////////////
Result blacklist(void const* key, uint32_t keySize);
Result blacklist(void const* key, uint32_t keySize) override;
private:
// friend class manager and tasks
@ -109,10 +109,10 @@ class PlainCache final : public Cache {
std::shared_ptr<Table> table,
bool enableWindowedStats);
virtual uint64_t freeMemoryFrom(uint32_t hash);
virtual uint64_t freeMemoryFrom(uint32_t hash) override;
virtual void migrateBucket(void* sourcePtr,
std::unique_ptr<Table::Subtable> targets,
std::shared_ptr<Table> newTable);
std::shared_ptr<Table> newTable) override;
// helpers
std::tuple<Result, PlainBucket*, Table*> getBucket(

View File

@ -78,7 +78,7 @@ class TransactionalCache final : public Cache {
/// fashion. The Result contained in the return value should report an error
/// code in this case. Should not block for long.
//////////////////////////////////////////////////////////////////////////////
Finding find(void const* key, uint32_t keySize);
Finding find(void const* key, uint32_t keySize) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Attempts to insert the given value.
@ -89,7 +89,7 @@ class TransactionalCache final : public Cache {
/// value if it fails to acquire a lock in a timely fashion. Should not block
/// for long.
//////////////////////////////////////////////////////////////////////////////
Result insert(CachedValue* value);
Result insert(CachedValue* value) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Attempts to remove the given key.
@ -100,7 +100,7 @@ class TransactionalCache final : public Cache {
/// before quitting, so may block for longer than find or insert. Client may
/// re-try.
//////////////////////////////////////////////////////////////////////////////
Result remove(void const* key, uint32_t keySize);
Result remove(void const* key, uint32_t keySize) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Attempts to blacklist the given key.
@ -111,7 +111,7 @@ class TransactionalCache final : public Cache {
/// before quitting, so may block for longer than find or insert. Client
/// should re-try.
//////////////////////////////////////////////////////////////////////////////
Result blacklist(void const* key, uint32_t keySize);
Result blacklist(void const* key, uint32_t keySize) override;
private:
// friend class manager and tasks
@ -125,10 +125,10 @@ class TransactionalCache final : public Cache {
std::shared_ptr<Table> table,
bool enableWindowedStats);
virtual uint64_t freeMemoryFrom(uint32_t hash);
virtual uint64_t freeMemoryFrom(uint32_t hash) override;
virtual void migrateBucket(void* sourcePtr,
std::unique_ptr<Table::Subtable> targets,
std::shared_ptr<Table> newTable);
std::shared_ptr<Table> newTable) override;
// helpers
std::tuple<Result, TransactionalBucket*, Table*> getBucket(

View File

@ -39,7 +39,7 @@ class Slice;
}
namespace graph {
struct ShortestPathOptions : public BaseOptions {
struct ShortestPathOptions final : public BaseOptions {
public:
std::string start;
std::string direction;

View File

@ -48,7 +48,7 @@ namespace graph {
struct BaseOptions;
struct SingleServerEdgeDocumentToken;
class SingleServerEdgeCursor : public EdgeCursor {
class SingleServerEdgeCursor final : public EdgeCursor {
private:
BaseOptions* _opts;
transaction::Methods* _trx;

View File

@ -153,7 +153,7 @@ class Traverser {
/// @brief Class to read vertices. Will return each vertex exactly once!
/////////////////////////////////////////////////////////////////////////////
class UniqueVertexGetter : public VertexGetter {
class UniqueVertexGetter final : public VertexGetter {
public:
explicit UniqueVertexGetter(Traverser* traverser)
: VertexGetter(traverser) {}

View File

@ -34,7 +34,7 @@ class Finding;
namespace graph {
class TraverserDocumentCache : public TraverserCache {
class TraverserDocumentCache final : public TraverserCache {
public:
explicit TraverserDocumentCache(aql::Query* query);

View File

@ -96,14 +96,14 @@ struct TraverserOptions : public graph::BaseOptions {
virtual ~TraverserOptions();
/// @brief Build a velocypack for cloning in the plan.
void toVelocyPack(arangodb::velocypack::Builder&) const;
void toVelocyPack(arangodb::velocypack::Builder&) const override;
/// @brief Build a velocypack for indexes
void toVelocyPackIndexes(arangodb::velocypack::Builder&) const;
void toVelocyPackIndexes(arangodb::velocypack::Builder&) const override;
/// @brief Build a velocypack containing all relevant information
/// for DBServer traverser engines.
void buildEngineInfo(arangodb::velocypack::Builder&) const;
void buildEngineInfo(arangodb::velocypack::Builder&) const override;
/// @brief Add a lookup info for specific depth
void addDepthLookupInfo(aql::ExecutionPlan* plan, std::string const& collectionName,
@ -123,7 +123,7 @@ struct TraverserOptions : public graph::BaseOptions {
void linkTraverser(arangodb::traverser::ClusterTraverser*);
double estimateCost(size_t& nrItems) const;
double estimateCost(size_t& nrItems) const override;
private:

View File

@ -54,7 +54,7 @@ class IResearchLinkMMFilesCoordinator final
_sparse = true; // always sparse
}
bool allowExpansion() const noexcept {
bool allowExpansion() const noexcept override {
return true;
}
@ -69,7 +69,7 @@ class IResearchLinkMMFilesCoordinator final
return true;
}
int drop() {
int drop() override {
return IResearchLinkCoordinator::drop().errorNumber();
}
@ -100,7 +100,7 @@ class IResearchLinkMMFilesCoordinator final
return false;
}
void load() noexcept {
void load() noexcept override {
// NOOP
}
@ -142,7 +142,7 @@ class IResearchLinkMMFilesCoordinator final
arangodb::velocypack::Builder& builder,
bool withFigures,
bool /*forPersistence*/
) const {
) const override {
TRI_ASSERT(!builder.isOpenObject());
builder.openObject();
bool const success = IResearchLinkCoordinator::toVelocyPack(builder);
@ -184,7 +184,7 @@ class IResearchLinkRocksDBCoordinator final
_sparse = true; // always sparse
}
bool allowExpansion() const noexcept {
bool allowExpansion() const noexcept override {
// maps to multivalued
return true;
}
@ -201,7 +201,7 @@ class IResearchLinkRocksDBCoordinator final
return true;
}
int drop() {
int drop() override {
return IResearchLinkCoordinator::drop().errorNumber();
}
@ -230,7 +230,7 @@ class IResearchLinkRocksDBCoordinator final
return false;
}
void load() noexcept {
void load() noexcept override {
// NOOP
}
@ -277,7 +277,7 @@ class IResearchLinkRocksDBCoordinator final
arangodb::velocypack::Builder& builder,
bool withFigures,
bool /*forPersistence*/
) const {
) const override {
TRI_ASSERT(!builder.isOpenObject());
builder.openObject();
bool const success = IResearchLinkCoordinator::toVelocyPack(builder);

View File

@ -213,7 +213,7 @@ class IResearchViewScatterNode final : public aql::ExecutionNode {
);
/// @brief return the type of the node
NodeType getType() const noexcept final {
NodeType getType() const noexcept override final {
return SCATTER_IRESEARCH_VIEW;
}
@ -221,21 +221,21 @@ class IResearchViewScatterNode final : public aql::ExecutionNode {
void toVelocyPackHelper(
arangodb::velocypack::Builder&,
bool
) const final;
) const override final;
/// @brief creates corresponding ExecutionBlock
std::unique_ptr<aql::ExecutionBlock> createBlock(
aql::ExecutionEngine& engine,
std::unordered_map<aql::ExecutionNode*, aql::ExecutionBlock*> const&,
std::unordered_set<std::string> const& includedShards
) const final;
) const override final;
/// @brief clone ExecutionNode recursively
aql::ExecutionNode* clone(
aql::ExecutionPlan* plan,
bool withDependencies,
bool withProperties
) const final {
) const override final {
auto node = std::make_unique<IResearchViewScatterNode>(
*plan, _id, *_vocbase, *_view
);
@ -246,7 +246,7 @@ class IResearchViewScatterNode final : public aql::ExecutionNode {
}
/// @brief estimateCost
double estimateCost(size_t&) const final;
double estimateCost(size_t&) const override final;
/// @brief return the database
TRI_vocbase_t& vocbase() const noexcept {

View File

@ -80,7 +80,7 @@ class MMFilesIndexFillerTask : public basics::LocalTask {
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> const& documents)
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
void run() {
void run() override {
TRI_ASSERT(_idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
try {

View File

@ -43,13 +43,13 @@ class MMFilesPersistentIndexKeyComparator final : public rocksdb::Comparator {
return arangodb::velocypack::Slice(slice.data() + MMFilesPersistentIndex::keyPrefixSize());
}
int Compare(rocksdb::Slice const& lhs, rocksdb::Slice const& rhs) const;
int Compare(rocksdb::Slice const& lhs, rocksdb::Slice const& rhs) const override;
char const* Name() const { return "ArangoComparator"; }
char const* Name() const override { return "ArangoComparator"; }
void FindShortestSeparator(std::string*,
rocksdb::Slice const&) const {}
void FindShortSuccessor(std::string*) const {}
rocksdb::Slice const&) const override {}
void FindShortSuccessor(std::string*) const override {}
};
}

View File

@ -30,7 +30,7 @@ namespace arangodb {
/// @brief StorageEngine agnostic wal access interface.
/// TODO: add methods for _admin/wal/ and get rid of engine specific handlers
class MMFilesWalAccess : public WalAccess {
class MMFilesWalAccess final : public WalAccess {
public:
MMFilesWalAccess() {}

View File

@ -46,7 +46,7 @@ LineRank::LineRank(arangodb::velocypack::Slice params)
struct LRMasterContext : MasterContext {
bool _stopNext = false;
bool postGlobalSuperstep() {
bool postGlobalSuperstep() override {
float const* diff = getAggregatedValue<float>(kDiff);
TRI_ASSERT(!_stopNext || *diff == 0);
if (_stopNext) {

View File

@ -49,7 +49,7 @@ template <typename M>
struct SumCombiner : public MessageCombiner<M> {
static_assert(std::is_arithmetic<M>::value, "Message type must be numeric");
SumCombiner() {}
void combine(M& firstValue, M const& secondValue) const {
void combine(M& firstValue, M const& secondValue) const override {
firstValue += secondValue;
}
};

View File

@ -35,26 +35,7 @@ class LogicalCollection;
class DatabaseInitialSyncer;
class ReplicationApplierConfiguration;
/*
arangodb::Result handleSyncKeysMMFiles(DatabaseInitialSyncer& syncer,
arangodb::LogicalCollection* col,
std::string const& keysId,
std::string const& leaderColl,
TRI_voc_tick_t maxTick);
arangodb::Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
arangodb::LogicalCollection* col,
std::string const& keysId,
std::string const& leaderColl,
TRI_voc_tick_t maxTick);
arangodb::Result syncChunkRocksDB(DatabaseInitialSyncer& syncer, SingleCollectionTransaction* trx,
std::string const& keysId, uint64_t chunkId,
std::string const& lowString,
std::string const& highString,
std::vector<std::pair<std::string, uint64_t>> const& markers);
*/
class DatabaseInitialSyncer : public InitialSyncer {
class DatabaseInitialSyncer final : public InitialSyncer {
friend ::arangodb::Result handleSyncKeysMMFiles(DatabaseInitialSyncer& syncer, arangodb::LogicalCollection* col,
std::string const& keysId);

View File

@ -31,7 +31,7 @@
namespace arangodb {
class DatabaseReplicationApplier;
class DatabaseTailingSyncer : public TailingSyncer {
class DatabaseTailingSyncer final : public TailingSyncer {
public:
DatabaseTailingSyncer(
TRI_vocbase_t& vocbase,
@ -70,4 +70,4 @@ class DatabaseTailingSyncer : public TailingSyncer {
};
}
#endif
#endif

View File

@ -29,7 +29,7 @@
namespace arangodb {
/// Meta Syncer driving multiple initial syncer
class GlobalInitialSyncer : public InitialSyncer {
class GlobalInitialSyncer final : public InitialSyncer {
public:
explicit GlobalInitialSyncer(ReplicationApplierConfiguration const&);

View File

@ -58,7 +58,7 @@ struct ApplierThread : public Thread {
shutdown();
}
void run() {
void run() override {
TRI_ASSERT(_syncer != nullptr);
TRI_ASSERT(_applier != nullptr);

View File

@ -38,7 +38,7 @@ namespace application_features {
class ApplicationServer;
}
class ConsoleThread : public Thread {
class ConsoleThread final : public Thread {
ConsoleThread(const ConsoleThread&) = delete;
ConsoleThread& operator=(const ConsoleThread&) = delete;

View File

@ -42,7 +42,7 @@ namespace aql {
class QueryRegistry;
}
class DatabaseManagerThread : public Thread {
class DatabaseManagerThread final : public Thread {
public:
DatabaseManagerThread(DatabaseManagerThread const&) = delete;
DatabaseManagerThread& operator=(DatabaseManagerThread const&) = delete;

View File

@ -31,7 +31,7 @@ namespace arangodb {
class RocksDBEngine;
class RocksDBBackgroundThread : public Thread {
class RocksDBBackgroundThread final : public Thread {
public:
//////////////////////////////////////////////////////////////////////////////
/// @brief engine pointer

View File

@ -104,7 +104,7 @@ class RocksDBEdgeIndexWarmupTask : public basics::LocalTask {
transaction::Methods* trx,
rocksdb::Slice const& lower,
rocksdb::Slice const& upper);
void run();
void run() override;
};
class RocksDBEdgeIndex final : public RocksDBIndex {

View File

@ -43,9 +43,9 @@ class RocksDBPrefixExtractor final : public rocksdb::SliceTransform {
RocksDBPrefixExtractor() {}
~RocksDBPrefixExtractor() {}
const char* Name() const { return "RocksDBPrefixExtractor"; }
const char* Name() const override { return "RocksDBPrefixExtractor"; }
rocksdb::Slice Transform(rocksdb::Slice const& key) const {
rocksdb::Slice Transform(rocksdb::Slice const& key) const override {
// 8-byte objectID + 0..n-byte string + 1-byte '\0'
// + 8 byte revisionID + 1-byte 0xFF (these are for cut off)
TRI_ASSERT(key.size() >= sizeof(char) + sizeof(uint64_t));
@ -62,18 +62,18 @@ class RocksDBPrefixExtractor final : public rocksdb::SliceTransform {
}
}
bool InDomain(rocksdb::Slice const& key) const {
bool InDomain(rocksdb::Slice const& key) const override {
// 8-byte objectID + n-byte string + 1-byte '\0' + ...
TRI_ASSERT(key.size() >= sizeof(char) + sizeof(uint64_t));
return key.data()[key.size() - 1] != '\0';
}
bool InRange(rocksdb::Slice const& dst) const {
bool InRange(rocksdb::Slice const& dst) const override {
TRI_ASSERT(dst.size() >= sizeof(char) + sizeof(uint64_t));
return dst.data()[dst.size() - 1] != '\0';
}
bool SameResultWhenAppended(rocksdb::Slice const& prefix) const {
bool SameResultWhenAppended(rocksdb::Slice const& prefix) const override {
return prefix.data()[prefix.size() - 1] == '\0';
}

View File

@ -89,7 +89,7 @@ TRI_replication_operation_e rocksutils::convertLogType(RocksDBLogType t) {
}
/// WAL parser
class WALParser : public rocksdb::WriteBatch::Handler {
class WALParser final : public rocksdb::WriteBatch::Handler {
// internal WAL parser states
enum State : char {
INVALID = 0,

View File

@ -30,7 +30,7 @@ namespace arangodb {
/// @brief StorageEngine agnostic wal access interface.
/// TODO: add methods for _admin/wal/ and get rid of engine specific handlers
class RocksDBWalAccess : public WalAccess {
class RocksDBWalAccess final : public WalAccess {
public:
RocksDBWalAccess() {}
virtual ~RocksDBWalAccess() {}

View File

@ -40,13 +40,13 @@ class JobQueueThread final
~JobQueueThread() { shutdown(); }
void beginShutdown() {
void beginShutdown() override {
Thread::beginShutdown();
_jobQueue->wakeup();
}
public:
void run() {
void run() override {
int idleTries = 0;
auto self = shared_from_this();

View File

@ -54,7 +54,7 @@ constexpr double MIN_SECONDS = 30.0;
// -----------------------------------------------------------------------------
namespace {
class SchedulerManagerThread : public Thread {
class SchedulerManagerThread final : public Thread {
public:
SchedulerManagerThread(Scheduler* scheduler, asio::io_context* service)
: Thread("SchedulerManager", true), _scheduler(scheduler), _service(service) {}
@ -62,7 +62,7 @@ class SchedulerManagerThread : public Thread {
~SchedulerManagerThread() { shutdown(); }
public:
void run() {
void run() override {
while (!_scheduler->isStopping()) {
try {
_service->run_one();
@ -92,7 +92,7 @@ class SchedulerThread : public Thread {
~SchedulerThread() { shutdown(); }
public:
void run() {
void run() override {
constexpr size_t EVERY_LOOP = size_t(MIN_SECONDS);
// when we enter this method,

View File

@ -81,7 +81,7 @@ class LogicalView : public LogicalDataSource {
//////////////////////////////////////////////////////////////////////////////
/// @brief drop an existing view
//////////////////////////////////////////////////////////////////////////////
virtual arangodb::Result drop() = 0;
virtual arangodb::Result drop() override = 0;
//////////////////////////////////////////////////////////////////////////////
/// @brief renames an existing view
@ -89,7 +89,7 @@ class LogicalView : public LogicalDataSource {
virtual Result rename(
std::string&& newName,
bool doSync
) = 0;
) override = 0;
//////////////////////////////////////////////////////////////////////////////
/// @brief builds a VelocyPack representation of the node LogicalView

View File

@ -33,7 +33,7 @@ namespace import {
class ImportHelper;
class AutoTuneThread : public arangodb::Thread {
class AutoTuneThread final : public arangodb::Thread {
private:
AutoTuneThread(AutoTuneThread const&) = delete;
AutoTuneThread& operator=(AutoTuneThread const&) = delete;

View File

@ -40,7 +40,7 @@ class SimpleHttpResult;
namespace import {
struct ImportStatistics;
class SenderThread : public arangodb::Thread {
class SenderThread final : public arangodb::Thread {
private:
SenderThread(SenderThread const&) = delete;
SenderThread& operator=(SenderThread const&) = delete;

View File

@ -75,7 +75,7 @@ struct Entry<Element, IndexType, false> {
};
template <class Element, class IndexType, bool useHashCache>
class MultiInserterTask : public LocalTask {
class MultiInserterTask final : public LocalTask {
private:
typedef Entry<Element, IndexType, useHashCache> EntryType;
typedef arangodb::basics::IndexBucket<EntryType, IndexType, SIZE_MAX> Bucket;
@ -109,7 +109,7 @@ class MultiInserterTask : public LocalTask {
_userData(userData),
_allBuckets(allBuckets) {}
void run() {
void run() override {
// sort first so we have a deterministic insertion order
std::sort((*_allBuckets)[_i].begin(), (*_allBuckets)[_i].end(),
[](DocumentsPerBucket const& lhs,
@ -145,7 +145,7 @@ class MultiInserterTask : public LocalTask {
};
template <class Element, class IndexType, bool useHashCache>
class MultiPartitionerTask : public LocalTask {
class MultiPartitionerTask final : public LocalTask {
private:
typedef MultiInserterTask<Element, IndexType, useHashCache> Inserter;
typedef std::vector<std::pair<Element, uint64_t>> DocumentsPerBucket;
@ -191,7 +191,7 @@ class MultiPartitionerTask : public LocalTask {
_inserters(inserters),
_bucketsMask(_allBuckets->size() - 1) {}
void run() {
void run() override {
try {
std::vector<DocumentsPerBucket> partitions;
partitions.resize(

View File

@ -53,7 +53,7 @@ struct BucketPosition {
};
template <class Element>
class UniqueInserterTask : public LocalTask {
class UniqueInserterTask final : public LocalTask {
private:
typedef arangodb::basics::IndexBucket<Element, uint64_t, SIZE_MAX> Bucket;
typedef std::vector<std::pair<Element, uint64_t>> DocumentsPerBucket;
@ -85,7 +85,7 @@ class UniqueInserterTask : public LocalTask {
_userData(userData),
_allBuckets(allBuckets) {}
void run() {
void run() override {
// actually insert them
try {
Bucket& b = (*_buckets)[static_cast<size_t>(_i)];
@ -117,7 +117,7 @@ class UniqueInserterTask : public LocalTask {
};
template <class Element>
class UniquePartitionerTask : public LocalTask {
class UniquePartitionerTask final : public LocalTask {
private:
typedef UniqueInserterTask<Element> Inserter;
typedef std::vector<std::pair<Element, uint64_t>> DocumentsPerBucket;
@ -163,7 +163,7 @@ class UniquePartitionerTask : public LocalTask {
_inserters(inserters),
_bucketsMask(_allBuckets->size() - 1) {}
void run() {
void run() override {
try {
std::vector<DocumentsPerBucket> partitions;
partitions.resize(

View File

@ -70,7 +70,7 @@ namespace arangodb {
namespace basics {
/// @brief arango exception type
class Exception : public virtual std::exception {
class Exception final : public virtual std::exception {
public:
static std::string FillExceptionString(int, ...);
static std::string FillFormatExceptionString(char const* format, ...);

View File

@ -98,13 +98,13 @@ class LogAppenderStdStream : public LogAppenderStream {
void writeLogMessage(LogLevel, char const*, size_t) override final;
};
class LogAppenderStderr : public LogAppenderStdStream {
class LogAppenderStderr final : public LogAppenderStdStream {
public:
explicit LogAppenderStderr(std::string const& filter)
: LogAppenderStdStream("+", filter, STDERR_FILENO) {}
};
class LogAppenderStdout : public LogAppenderStdStream {
class LogAppenderStdout final : public LogAppenderStdStream {
public:
explicit LogAppenderStdout(std::string const& filter)
: LogAppenderStdStream("-", filter, STDOUT_FILENO) {}

View File

@ -29,7 +29,7 @@
namespace arangodb {
#ifdef ARANGODB_ENABLE_SYSLOG
class LogAppenderSyslog : public LogAppender {
class LogAppenderSyslog final : public LogAppender {
public:
static void close();

View File

@ -56,7 +56,7 @@ class VstResponse : public GeneralResponse {
VPackMessageNoOwnBuffer prepareForNetwork();
void reset(ResponseCode code) final;
void reset(ResponseCode code) override final;
void addPayload(VPackSlice const&,
arangodb::velocypack::Options const* = nullptr,
bool resolveExternals = true) override;

View File

@ -41,18 +41,18 @@ struct ExpressionContextMock final : arangodb::aql::ExpressionContext {
virtual ~ExpressionContextMock();
virtual size_t numRegisters() const{
virtual size_t numRegisters() const override {
TRI_ASSERT(false);
return 0;
}
virtual arangodb::aql::AqlValue const& getRegisterValue(size_t) const {
virtual arangodb::aql::AqlValue const& getRegisterValue(size_t) const override {
TRI_ASSERT(false);
static arangodb::aql::AqlValue EMPTY;
return EMPTY;
}
virtual arangodb::aql::Variable const* getVariable(size_t i) const {
virtual arangodb::aql::Variable const* getVariable(size_t i) const override {
TRI_ASSERT(false);
return nullptr;
}
@ -61,7 +61,7 @@ struct ExpressionContextMock final : arangodb::aql::ExpressionContext {
arangodb::aql::Variable const* variable,
bool doCopy,
bool& mustDestroy
) const;
) const override;
std::unordered_map<std::string, arangodb::aql::AqlValue> vars;
}; // ExpressionContextMock

View File

@ -215,7 +215,7 @@ struct custom_sort: public irs::sort {
DECLARE_FACTORY_DEFAULT();
custom_sort(): sort(custom_sort::type()) {}
virtual prepared::ptr prepare() const {
virtual prepared::ptr prepare() const override {
return custom_sort::prepared::make<custom_sort::prepared>(*this);
}
};

View File

@ -413,18 +413,18 @@ class IndexMock final : public arangodb::Index {
IndexMock()
: arangodb::Index(0, nullptr, std::vector<std::vector<arangodb::basics::AttributeName>>(), false, false) {
}
virtual char const* typeName() const { return "IndexMock"; }
virtual bool allowExpansion() const { return false; }
virtual IndexType type() const { return TRI_IDX_TYPE_UNKNOWN; }
virtual bool canBeDropped() const { return true; }
virtual bool isSorted() const { return true; }
virtual bool hasSelectivityEstimate() const { return false; }
virtual size_t memory() const { return 0; }
virtual char const* typeName() const override { return "IndexMock"; }
virtual bool allowExpansion() const override { return false; }
virtual IndexType type() const override { return TRI_IDX_TYPE_UNKNOWN; }
virtual bool canBeDropped() const override { return true; }
virtual bool isSorted() const override { return true; }
virtual bool hasSelectivityEstimate() const override { return false; }
virtual size_t memory() const override { return 0; }
virtual arangodb::Result insert(
arangodb::transaction::Methods*,
arangodb::LocalDocumentId const&,
arangodb::velocypack::Slice const&,
OperationMode mode) {
OperationMode mode) override {
TRI_ASSERT(false);
return arangodb::Result();
}
@ -432,12 +432,12 @@ class IndexMock final : public arangodb::Index {
arangodb::transaction::Methods*,
arangodb::LocalDocumentId const&,
arangodb::velocypack::Slice const&,
OperationMode mode) {
OperationMode mode) override {
TRI_ASSERT(false);
return arangodb::Result();
}
virtual void load() {}
virtual void unload() {}
virtual void load() override {}
virtual void unload() override {}
} EMPTY_INDEX;
class ReverseAllIteratorMock final : public arangodb::IndexIterator {