1
0
Fork 0

removing unused and unimplemented methods

This commit is contained in:
Simon Grätzer 2017-05-10 18:25:41 +02:00
parent d8053a9bcf
commit 9950fa7245
24 changed files with 60 additions and 397 deletions

View File

@ -31,8 +31,8 @@
#include "Logger/Logger.h"
#include "MMFiles/MMFilesCollection.h"
#include "MMFiles/MMFilesDitch.h"
#include "MMFiles/MMFilesEngine.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "Utils/CursorRepository.h"
#include "VocBase/LogicalCollection.h"
#include "MMFiles/MMFilesLogfileManager.h"
@ -51,7 +51,7 @@ void MMFilesCleanupThread::signal() {
/// @brief cleanup event loop
void MMFilesCleanupThread::run() {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
uint64_t iterations = 0;
std::vector<arangodb::LogicalCollection*> collections;
@ -226,8 +226,9 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec
return;
}
}
if (!collection->getPhysical()->isFullyCollected()) {
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(collection->getPhysical());
if (!mmColl->isFullyCollected()) {
bool isDeleted = false;
// if there is still some garbage collection to perform,

View File

@ -43,6 +43,7 @@
#include "MMFiles/MMFilesDatafileHelper.h"
#include "MMFiles/MMFilesDocumentOperation.h"
#include "MMFiles/MMFilesDocumentPosition.h"
#include "MMFiles/MMFilesEngine.h"
#include "MMFiles/MMFilesIndexElement.h"
#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/MMFilesPrimaryIndex.h"
@ -1676,7 +1677,7 @@ int MMFilesCollection::openWorker(bool ignoreErrors) {
try {
// check for journals and datafiles
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
int res = engine->openCollection(vocbase, _logicalCollection, ignoreErrors);
if (res != TRI_ERROR_NO_ERROR) {
@ -2240,9 +2241,8 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
}
auto cid = _logicalCollection->cid();
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
engine->dropIndex(vocbase, cid, iid);
{
bool const doSync =
application_features::ApplicationServer::getFeature<DatabaseFeature>(

View File

@ -234,9 +234,9 @@ class MMFilesCollection final : public PhysicalCollection {
void open(bool ignoreErrors) override;
/// @brief iterate all markers of a collection on load
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx);
bool isFullyCollected() const override;
bool isFullyCollected() const;
bool doCompact() const { return _doCompact; }
@ -330,7 +330,7 @@ class MMFilesCollection final : public PhysicalCollection {
bool readDocumentConditional(transaction::Methods* trx,
DocumentIdentifierToken const& token,
TRI_voc_tick_t maxTick,
ManagedDocumentResult& result) override;
ManagedDocumentResult& result);
int insert(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice,

View File

@ -25,9 +25,9 @@
#include "Basics/WriteLocker.h"
#include "MMFiles/MMFilesCollection.h"
#include "MMFiles/MMFilesDitch.h"
#include "MMFiles/MMFilesEngine.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/PhysicalCollection.h"
#include "StorageEngine/StorageEngine.h"
#include "Utils/CollectionGuard.h"
#include "Utils/SingleCollectionTransaction.h"
#include "Transaction/StandaloneContext.h"
@ -60,7 +60,7 @@ MMFilesCollectionExport::~MMFilesCollectionExport() {
}
void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
// try to acquire the exclusive lock on the compaction
engine->preventCompaction(_collection->vocbase(), [this](TRI_vocbase_t* vocbase) {
@ -81,8 +81,9 @@ void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
uint64_t tries = 0;
uint64_t const maxTries = maxWaitTime / SleepTime;
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
while (++tries < maxTries) {
if (_collection->getPhysical()->isFullyCollected()) {
if (mmColl->isFullyCollected()) {
break;
}
usleep(SleepTime);
@ -111,12 +112,13 @@ void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
_vpack.reserve(limit);
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
ManagedDocumentResult mmdr;
trx.invokeOnAllElements(_collection->name(), [this, &limit, &trx, &mmdr](DocumentIdentifierToken const& token) {
trx.invokeOnAllElements(_collection->name(), [this, &limit, &trx, &mmdr, mmColl](DocumentIdentifierToken const& token) {
if (limit == 0) {
return false;
}
if (_collection->readDocumentConditional(&trx, token, 0, mmdr)) {
if (mmColl->readDocumentConditional(&trx, token, 0, mmdr)) {
_vpack.emplace_back(mmdr.vpack());
--limit;
}

View File

@ -27,8 +27,8 @@
#include "MMFiles/MMFilesCollection.h"
#include "MMFiles/MMFilesDitch.h"
#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/MMFilesEngine.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "Transaction/Helpers.h"
#include "Utils/CollectionGuard.h"
#include "Utils/SingleCollectionTransaction.h"
@ -60,7 +60,7 @@ MMFilesCollectionKeys::MMFilesCollectionKeys(TRI_vocbase_t* vocbase, std::string
MMFilesCollectionKeys::~MMFilesCollectionKeys() {
// remove compaction blocker
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
engine->removeCompactionBlocker(_vocbase, _blockerId);
if (_ditch != nullptr) {
@ -76,7 +76,7 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
MMFilesLogfileManager::instance()->waitForCollectorQueue(
_collection->cid(), 30.0);
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
engine->preventCompaction(_collection->vocbase(), [this](TRI_vocbase_t* vocbase) {
// create a ditch under the compaction lock
_ditch = arangodb::MMFilesCollection::toMMFilesCollection(_collection)
@ -104,9 +104,10 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
}
ManagedDocumentResult mmdr;
MMFilesCollection *mmColl = MMFilesCollection::toMMFilesCollection(_collection);
trx.invokeOnAllElements(
_collection->name(), [this, &trx, &maxTick, &mmdr](DocumentIdentifierToken const& token) {
if (_collection->readDocumentConditional(&trx, token, maxTick, mmdr)) {
_collection->name(), [this, &trx, &maxTick, &mmdr, &mmColl](DocumentIdentifierToken const& token) {
if (mmColl->readDocumentConditional(&trx, token, maxTick, mmdr)) {
_vpack.emplace_back(mmdr.vpack());
}
return true;

View File

@ -35,10 +35,10 @@
#include "MMFiles/MMFilesDatafileHelper.h"
#include "MMFiles/MMFilesDatafileStatisticsContainer.h"
#include "MMFiles/MMFilesDocumentPosition.h"
#include "MMFiles/MMFilesEngine.h"
#include "MMFiles/MMFilesIndexElement.h"
#include "MMFiles/MMFilesPrimaryIndex.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "Utils/SingleCollectionTransaction.h"
#include "Transaction/StandaloneContext.h"
#include "Transaction/Helpers.h"
@ -856,7 +856,7 @@ void MMFilesCompactorThread::signal() {
}
void MMFilesCompactorThread::run() {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
std::vector<arangodb::LogicalCollection*> collections;
int numCompacted = 0;

View File

@ -1604,30 +1604,6 @@ void MMFilesEngine::signalCleanup(TRI_vocbase_t* vocbase) {
(*it).second->signal();
}
// iterate all documents of the underlying collection
// this is called when a collection is openend, and all its documents need to be
// added to
// indexes etc.
void MMFilesEngine::iterateDocuments(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
std::function<void(arangodb::velocypack::Slice const&)> const& cb) {}
// adds a document to the storage engine
// this will be called by the WAL collector when surviving documents are being
// moved
// into the storage engine's realm
void MMFilesEngine::addDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) {}
// removes a document from the storage engine
// this will be called by the WAL collector when non-surviving documents are
// being removed
// from the storage engine's realm
void MMFilesEngine::removeDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) {}
/// @brief scans a collection and locates all files
MMFilesEngineCollectionFiles MMFilesEngine::scanCollectionDirectory(
std::string const& path) {

View File

@ -267,11 +267,11 @@ class MMFilesEngine final : public StorageEngine {
// the WAL entry for index deletion will be written *after* the call
// to "dropIndex" returns
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id) override;
TRI_idx_iid_t id);
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data,
bool writeMarker, int&) override;
bool writeMarker, int&);
void unloadCollection(TRI_vocbase_t* vocbase,
arangodb::LogicalCollection* collection) override;
@ -298,67 +298,39 @@ class MMFilesEngine final : public StorageEngine {
void signalCleanup(TRI_vocbase_t* vocbase) override;
// document operations
// -------------------
// iterate all documents of the underlying collection
// this is called when a collection is openend, and all its documents need to
// be added to
// indexes etc.
void iterateDocuments(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
std::function<void(arangodb::velocypack::Slice const&)> const& cb)
override;
// adds a document to the storage engine
// this will be called by the WAL collector when surviving documents are being
// moved
// into the storage engine's realm
void addDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) override;
// removes a document from the storage engine
// this will be called by the WAL collector when non-surviving documents are
// being removed
// from the storage engine's realm
void removeDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) override;
/// @brief scans a collection and locates all files
MMFilesEngineCollectionFiles scanCollectionDirectory(std::string const& path);
/// @brief remove data of expired compaction blockers
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) override;
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase);
/// @brief insert a compaction blocker
int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
TRI_voc_tick_t& id) override;
TRI_voc_tick_t& id);
/// @brief touch an existing compaction blocker
int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id,
double ttl) override;
double ttl);
/// @brief remove an existing compaction blocker
int removeCompactionBlocker(TRI_vocbase_t* vocbase,
TRI_voc_tick_t id) override;
TRI_voc_tick_t id);
/// @brief a callback function that is run while it is guaranteed that there
/// is no compaction ongoing
void preventCompaction(
TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback) override;
std::function<void(TRI_vocbase_t*)> const& callback);
/// @brief a callback function that is run there is no compaction ongoing
bool tryPreventCompaction(TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback,
bool checkForActiveBlockers) override;
bool checkForActiveBlockers);
int shutdownDatabase(TRI_vocbase_t* vocbase) override;
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection,
bool ignoreErrors) override;
bool ignoreErrors);
/// @brief Add engine-specific AQL functions.
void addAqlFunctions() override;

View File

@ -36,6 +36,7 @@
#include "Indexes/Index.h"
#include "Logger/Logger.h"
#include "MMFiles/MMFilesCollectionKeys.h"
#include "MMFiles/MMFilesEngine.h"
#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/mmfiles-replication-dump.h"
#include "Replication/InitialSyncer.h"
@ -542,7 +543,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
TRI_voc_tick_t id;
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
int res = engine->insertCompactionBlocker(_vocbase, expires, id);
if (res != TRI_ERROR_NO_ERROR) {
@ -575,7 +576,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
// now extend the blocker
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
int res = engine->extendCompactionBlocker(_vocbase, id, expires);
if (res == TRI_ERROR_NO_ERROR) {
@ -591,7 +592,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
TRI_voc_tick_t id =
static_cast<TRI_voc_tick_t>(StringUtils::uint64(suffixes[1]));
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
int res = engine->removeCompactionBlocker(_vocbase, id);
if (res == TRI_ERROR_NO_ERROR) {
@ -2357,7 +2358,7 @@ void MMFilesRestReplicationHandler::handleCommandCreateKeys() {
TRI_ASSERT(col != nullptr);
// turn off the compaction for the collection
StorageEngine* engine = EngineSelectorFeature::ENGINE;
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
TRI_voc_tick_t id;
int res = engine->insertCompactionBlocker(_vocbase, 1200.0, id);

View File

@ -196,18 +196,6 @@ void RocksDBCollection::open(bool ignoreErrors) {
}
}
/// @brief iterate all markers of a collection on load
int RocksDBCollection::iterateMarkersOnLoad(
arangodb::transaction::Methods* trx) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return 0;
}
bool RocksDBCollection::isFullyCollected() const {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return false;
}
void RocksDBCollection::prepareIndexes(
arangodb::velocypack::Slice indexesSlice) {
TRI_ASSERT(indexesSlice.isArray());
@ -839,15 +827,6 @@ bool RocksDBCollection::readDocument(transaction::Methods* trx,
return res.ok();
}
bool RocksDBCollection::readDocumentConditional(
transaction::Methods* trx, DocumentIdentifierToken const& token,
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
// should not be called for RocksDB engine. TODO: move this out of general
// API!
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return false;
}
int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const slice,
arangodb::ManagedDocumentResult& mdr,

View File

@ -80,11 +80,6 @@ class RocksDBCollection final : public PhysicalCollection {
size_t memory() const override;
void open(bool ignoreErrors) override;
/// @brief iterate all markers of a collection on load
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
bool isFullyCollected() const override;
////////////////////////////////////
// -- SECTION Indexes --
///////////////////////////////////
@ -133,11 +128,6 @@ class RocksDBCollection final : public PhysicalCollection {
DocumentIdentifierToken const& token,
ManagedDocumentResult& result) override;
bool readDocumentConditional(transaction::Methods* trx,
DocumentIdentifierToken const& token,
TRI_voc_tick_t maxTick,
ManagedDocumentResult& result) override;
int insert(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice,
arangodb::ManagedDocumentResult& result, OperationOptions& options,

View File

@ -52,30 +52,7 @@ RocksDBCollectionExport::RocksDBCollectionExport(
RocksDBCollectionExport::~RocksDBCollectionExport() {}
void RocksDBCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
// none of this should matter on rocksdb
// try to acquire the exclusive lock on the compaction
/*
StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->preventCompaction(_collection->vocbase(),
[this](TRI_vocbase_t* vocbase) {
// TODO: do something under compaction lock?
});
{
static uint64_t const SleepTime = 10000;
uint64_t tries = 0;
uint64_t const maxTries = maxWaitTime / SleepTime;
while (++tries < maxTries) {
if (_collection->getPhysical()->isFullyCollected()) {
break;
}
usleep(SleepTime);
}
}*/
void RocksDBCollectionExport::run(size_t limit) {
{
SingleCollectionTransaction trx(
@ -110,10 +87,6 @@ void RocksDBCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
_vpack.emplace_back(VPackSlice(mmdr.vpack()));
--limit;
}
/*if (_collection->readDocumentConditional(&trx, token, 0, mmdr)) {
_vpack.emplace_back(VPackSlice(mmdr.vpack()));
--limit;
}*/
return true;
});

View File

@ -61,7 +61,7 @@ class RocksDBCollectionExport {
~RocksDBCollectionExport();
public:
void run(uint64_t, size_t);
void run(size_t);
private:
std::unique_ptr<arangodb::CollectionGuard> _guard;

View File

@ -209,9 +209,10 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
/// @brief return a VelocyPack representation of the index figures
void RocksDBEdgeIndex::toVelocyPackFigures(VPackBuilder& builder) const {
Index::toVelocyPackFigures(builder);
// TODO
THROW_ARANGO_NOT_YET_IMPLEMENTED();
RocksDBIndex::toVelocyPackFigures(builder);
//builder.add(_directionAttr, VPackValue(VPackValueType::Object));
//_edgesFrom->appendToVelocyPack(builder);
//builder.close();
}
int RocksDBEdgeIndex::insert(transaction::Methods* trx,

View File

@ -527,8 +527,7 @@ std::string RocksDBEngine::collectionPath(TRI_vocbase_t const* vocbase,
}
void RocksDBEngine::waitForSync(TRI_voc_tick_t tick) {
// TODO: does anything need to be done here?
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
// intentionally empty, not useful for this type of engine
}
std::shared_ptr<arangodb::velocypack::Builder>
@ -835,20 +834,6 @@ void RocksDBEngine::createIndex(TRI_vocbase_t* vocbase,
TRI_idx_iid_t indexId,
arangodb::velocypack::Slice const& data) {}
void RocksDBEngine::dropIndex(TRI_vocbase_t* vocbase,
TRI_voc_cid_t collectionId, TRI_idx_iid_t iid) {
// probably not required
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
void RocksDBEngine::dropIndexWalMarker(TRI_vocbase_t* vocbase,
TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data,
bool writeMarker, int&) {
// probably not required
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
void RocksDBEngine::unloadCollection(TRI_vocbase_t* vocbase,
arangodb::LogicalCollection* collection) {
// TODO: does anything else have to happen?
@ -892,80 +877,10 @@ void RocksDBEngine::signalCleanup(TRI_vocbase_t*) {
// nothing to do here
}
// document operations
// -------------------
void RocksDBEngine::iterateDocuments(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
std::function<void(arangodb::velocypack::Slice const&)> const& cb) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
void RocksDBEngine::addDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
void RocksDBEngine::removeDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
/// @brief remove data of expired compaction blockers
bool RocksDBEngine::cleanupCompactionBlockers(TRI_vocbase_t* vocbase) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return true;
}
/// @brief insert a compaction blocker
int RocksDBEngine::insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
TRI_voc_tick_t& id) {
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
return TRI_ERROR_NO_ERROR;
}
/// @brief touch an existing compaction blocker
int RocksDBEngine::extendCompactionBlocker(TRI_vocbase_t* vocbase,
TRI_voc_tick_t id, double ttl) {
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
return TRI_ERROR_NO_ERROR;
}
/// @brief remove an existing compaction blocker
int RocksDBEngine::removeCompactionBlocker(TRI_vocbase_t* vocbase,
TRI_voc_tick_t id) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return true;
}
/// @brief a callback function that is run while it is guaranteed that there
/// is no compaction ongoing
void RocksDBEngine::preventCompaction(
TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
}
/// @brief a callback function that is run there is no compaction ongoing
bool RocksDBEngine::tryPreventCompaction(
TRI_vocbase_t* vocbase, std::function<void(TRI_vocbase_t*)> const& callback,
bool checkForActiveBlockers) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return true;
}
int RocksDBEngine::shutdownDatabase(TRI_vocbase_t* vocbase) {
return TRI_ERROR_NO_ERROR;
}
int RocksDBEngine::openCollection(TRI_vocbase_t* vocbase,
LogicalCollection* collection,
bool ignoreErrors) {
THROW_ARANGO_NOT_YET_IMPLEMENTED();
return 0;
}
/// @brief Add engine-specific AQL functions.
void RocksDBEngine::addAqlFunctions() {
RocksDBAqlFunctions::registerResources();
@ -1007,16 +922,16 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
return it->second;
}
bool RocksDBEngine::syncWal() {
arangodb::Result RocksDBEngine::syncWal() {
#ifdef _WIN32
// SyncWAL always reports "not implemented" on Windows
return true;
return arangodb::Result();
#else
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
if (!status.ok()) {
return false;
return rocksutils::convertStatus(status);
}
return true;
return arangodb::Result();
#endif
}

View File

@ -172,11 +172,6 @@ class RocksDBEngine final : public StorageEngine {
void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id,
arangodb::velocypack::Slice const& data) override;
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id) override;
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data,
bool writeMarker, int&) override;
void unloadCollection(TRI_vocbase_t* vocbase,
arangodb::LogicalCollection* collection) override;
void createView(TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
@ -192,50 +187,8 @@ class RocksDBEngine final : public StorageEngine {
arangodb::LogicalView const*, bool doSync) override;
void signalCleanup(TRI_vocbase_t* vocbase) override;
// document operations
// -------------------
void iterateDocuments(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
std::function<void(arangodb::velocypack::Slice const&)> const& cb)
override;
void addDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) override;
void removeDocumentRevision(
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) override;
/// @brief remove data of expired compaction blockers
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) override;
/// @brief insert a compaction blocker
int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
TRI_voc_tick_t& id) override;
/// @brief touch an existing compaction blocker
int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id,
double ttl) override;
/// @brief remove an existing compaction blocker
int removeCompactionBlocker(TRI_vocbase_t* vocbase,
TRI_voc_tick_t id) override;
/// @brief a callback function that is run while it is guaranteed that there
/// is no compaction ongoing
void preventCompaction(
TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback) override;
/// @brief a callback function that is run there is no compaction ongoing
bool tryPreventCompaction(TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback,
bool checkForActiveBlockers) override;
int shutdownDatabase(TRI_vocbase_t* vocbase) override;
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection,
bool ignoreErrors) override;
/// @brief Add engine-specific AQL functions.
void addAqlFunctions() override;
@ -281,7 +234,7 @@ class RocksDBEngine final : public StorageEngine {
static std::string const FeatureName;
RocksDBCounterManager* counterManager() const;
RocksDBReplicationManager* replicationManager() const;
bool syncWal();
arangodb::Result syncWal();
private:
/// single rocksdb database used in this storage engine

View File

@ -223,30 +223,13 @@ void RocksDBRestExportHandler::createCursor() {
}
VPackSlice options = optionsBuilder.slice();
uint64_t waitTime = 0;
bool flush = arangodb::basics::VelocyPackHelper::getBooleanValue(
options, "flush", false);
if (flush) {
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
double flushWait =
arangodb::basics::VelocyPackHelper::getNumericValue<double>(
options, "flushWait", 10.0);
waitTime = static_cast<uint64_t>(
flushWait * 1000 *
1000); // flushWait is specified in s, but we need ns
}
size_t limit = arangodb::basics::VelocyPackHelper::getNumericValue<size_t>(
options, "limit", 0);
// this may throw!
auto collectionExport =
std::make_unique<RocksDBCollectionExport>(_vocbase, name, _restrictions);
collectionExport->run(waitTime, limit);
collectionExport->run(limit);
size_t batchSize =
arangodb::basics::VelocyPackHelper::getNumericValue<size_t>(

View File

@ -366,11 +366,6 @@ void RocksDBRestReplicationHandler::handleCommandBatch() {
// extract ttl
// double expires =
// VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
// TRI_voc_tick_t id;
// StorageEngine* engine = EngineSelectorFeature::ENGINE;
// int res = engine->insertCompactionBlocker(_vocbase, expires, id);
RocksDBReplicationContext* ctx = _manager->createContext();
if (ctx == nullptr) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to create replication context");
@ -1639,9 +1634,6 @@ void RocksDBRestReplicationHandler::handleCommandSync() {
config._verbose = verbose;
config._useCollectionId = useCollectionId;
// wait until all data in current logfile got synced
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
verbose);

View File

@ -46,8 +46,10 @@ static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
arangodb::Result ret = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
if (!ret.ok()) {
THROW_ARANGO_EXCEPTION_MESSAGE(ret.errorNumber(), ret.errorMessage());
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}

View File

@ -84,11 +84,6 @@ class PhysicalCollection {
/// @brief opens an existing collection
virtual void open(bool ignoreErrors) = 0;
/// @brief iterate all markers of a collection on load
virtual int iterateMarkersOnLoad(transaction::Methods* trx) = 0;
virtual bool isFullyCollected() const = 0;
void drop();
////////////////////////////////////
@ -144,11 +139,6 @@ class PhysicalCollection {
DocumentIdentifierToken const& token,
ManagedDocumentResult& result) = 0;
virtual bool readDocumentConditional(transaction::Methods* trx,
DocumentIdentifierToken const& token,
TRI_voc_tick_t maxTick,
ManagedDocumentResult& result) = 0;
virtual int insert(arangodb::transaction::Methods* trx,
arangodb::velocypack::Slice const newSlice,
arangodb::ManagedDocumentResult& result,

View File

@ -321,18 +321,6 @@ class StorageEngine : public application_features::ApplicationFeature {
virtual void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id, arangodb::velocypack::Slice const& data) = 0;
// asks the storage engine to drop the specified index and persist the deletion
// info. Note that physical deletion of the index must not be carried out by this call,
// as there may still be users of the index. It is recommended that this operation
// only sets a deletion flag for the index but let's an async task perform
// the actual deletion.
// the WAL entry for index deletion will be written *after* the call
// to "dropIndex" returns
virtual void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id) = 0;
virtual void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool useMarker, int&) = 0;
// Returns the StorageEngine-specific implementation
// of the IndexFactory. This is used to validate
// information about indexes.
@ -347,53 +335,8 @@ class StorageEngine : public application_features::ApplicationFeature {
virtual void signalCleanup(TRI_vocbase_t* vocbase) = 0;
// document operations
// -------------------
// iterate all documents of the underlying collection
// this is called when a collection is openend, and all its documents need to be added to
// indexes etc.
virtual void iterateDocuments(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
std::function<void(arangodb::velocypack::Slice const&)> const& cb) = 0;
// adds a document to the storage engine
// this will be called by the WAL collector when surviving documents are being moved
// into the storage engine's realm
virtual void addDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) = 0;
// removes a document from the storage engine
// this will be called by the WAL collector when non-surviving documents are being removed
// from the storage engine's realm
virtual void removeDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& document) = 0;
/// @brief remove data of expired compaction blockers
virtual bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) = 0;
/// @brief insert a compaction blocker
virtual int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl, TRI_voc_tick_t& id) = 0;
/// @brief touch an existing compaction blocker
virtual int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id, double ttl) = 0;
/// @brief remove an existing compaction blocker
virtual int removeCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id) = 0;
/// @brief a callback function that is run while it is guaranteed that there is no compaction ongoing
virtual void preventCompaction(TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback) = 0;
/// @brief a callback function that is run there is no compaction ongoing
virtual bool tryPreventCompaction(TRI_vocbase_t* vocbase,
std::function<void(TRI_vocbase_t*)> const& callback,
bool checkForActiveBlockers) = 0;
virtual int shutdownDatabase(TRI_vocbase_t* vocbase) = 0;
virtual int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection, bool ignoreErrors) = 0;
// AQL functions
// -------------

View File

@ -1412,7 +1412,7 @@ OperationResult transaction::Methods::insertLocal(
res = workForOneDocument(value);
}
// wait for operation(s) to be synced to disk here
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 &&
isSingleOperationTransaction()) {
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
@ -1723,7 +1723,7 @@ OperationResult transaction::Methods::modifyLocal(
res = workForOneDocument(newValue, false);
}
// wait for operation(s) to be synced to disk here
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
if (res.ok() && options.waitForSync && maxTick > 0 &&
isSingleOperationTransaction()) {
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
@ -1968,7 +1968,7 @@ OperationResult transaction::Methods::removeLocal(
res = workForOneDocument(value, false);
}
// wait for operation(s) to be synced to disk here
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 &&
isSingleOperationTransaction()) {
EngineSelectorFeature::ENGINE->waitForSync(maxTick);

View File

@ -1160,12 +1160,6 @@ bool LogicalCollection::readDocument(transaction::Methods* trx,
return getPhysical()->readDocument(trx, token, result);
}
bool LogicalCollection::readDocumentConditional(
transaction::Methods* trx, DocumentIdentifierToken const& token,
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
return getPhysical()->readDocumentConditional(trx, token, maxTick, result);
}
/// @brief a method to skip certain documents in AQL write operations,
/// this is only used in the enterprise edition for smart graphs
#ifndef USE_ENTERPRISE

View File

@ -277,11 +277,6 @@ class LogicalCollection {
DocumentIdentifierToken const& token,
ManagedDocumentResult& result);
bool readDocumentConditional(transaction::Methods* trx,
DocumentIdentifierToken const& token,
TRI_voc_tick_t maxTick,
ManagedDocumentResult& result);
/// @brief Persist the connected physical collection.
/// This should be called AFTER the collection is successfully
/// created and only on Sinlge/DBServer