mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
ab815e6020
|
@ -47,8 +47,7 @@ class ArrayIterator {
|
|||
if (slice.type() != ValueType::Array) {
|
||||
throw Exception(Exception::InvalidValueType, "Expecting Array slice");
|
||||
}
|
||||
|
||||
reset();
|
||||
reset();
|
||||
}
|
||||
|
||||
ArrayIterator(ArrayIterator const& other) noexcept
|
||||
|
@ -58,6 +57,7 @@ class ArrayIterator {
|
|||
_current(other._current) {}
|
||||
|
||||
ArrayIterator& operator=(ArrayIterator const& other) = delete;
|
||||
ArrayIterator& operator=(ArrayIterator && other) = default;
|
||||
|
||||
// prefix ++
|
||||
ArrayIterator& operator++() {
|
||||
|
@ -167,7 +167,7 @@ class ArrayIterator {
|
|||
|
||||
private:
|
||||
Slice _slice;
|
||||
ValueLength const _size;
|
||||
ValueLength _size;
|
||||
ValueLength _position;
|
||||
uint8_t const* _current;
|
||||
};
|
||||
|
@ -210,6 +210,7 @@ class ObjectIterator {
|
|||
_useSequentialIteration(other._useSequentialIteration) {}
|
||||
|
||||
ObjectIterator& operator=(ObjectIterator const& other) = delete;
|
||||
ObjectIterator& operator=(ObjectIterator && other) = default;
|
||||
|
||||
// prefix ++
|
||||
ObjectIterator& operator++() {
|
||||
|
@ -306,10 +307,10 @@ class ObjectIterator {
|
|||
|
||||
private:
|
||||
Slice _slice;
|
||||
ValueLength const _size;
|
||||
ValueLength _size;
|
||||
ValueLength _position;
|
||||
uint8_t const* _current;
|
||||
bool const _useSequentialIteration;
|
||||
bool _useSequentialIteration;
|
||||
};
|
||||
|
||||
} // namespace arangodb::velocypack
|
||||
|
|
|
@ -1344,7 +1344,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
return result;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(waitUntil-std::chrono::steady_clock::now());
|
||||
std::this_thread::sleep_until(waitUntil);
|
||||
if (waitInterval.count() == 0.0) {
|
||||
waitInterval = std::chrono::duration<double>(0.25);
|
||||
} else if (waitInterval.count() < 5.0) {
|
||||
|
|
|
@ -318,7 +318,7 @@ bool FailedLeader::start() {
|
|||
auto slice = result.get(
|
||||
std::vector<std::string>({
|
||||
agencyPrefix, "Supervision", "Health", _from, "Status"}));
|
||||
if (!slice.isString() || slice.copyString() != "FAILED") {
|
||||
if (slice.isString() && slice.copyString() == "GOOD") {
|
||||
finish("", _shard, false, "Server " + _from + " no longer failing.");
|
||||
return false;
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ bool FailedLeader::start() {
|
|||
slice = result.get(
|
||||
std::vector<std::string>(
|
||||
{agencyPrefix, "Supervision", "Health", _to, "Status"}));
|
||||
if (!slice.isString() || slice.copyString() != "GOOD") {
|
||||
if (slice.isString() && slice.copyString() != "GOOD") {
|
||||
LOG_TOPIC(INFO, Logger::SUPERVISION)
|
||||
<< "Will not failover from " << _from << " to " << _to
|
||||
<< " as target server is no longer in good condition. Will retry.";
|
||||
|
|
|
@ -807,6 +807,41 @@ bool Node::getBool() const {
|
|||
return slice().getBool();
|
||||
}
|
||||
|
||||
bool Node::isBool() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isBool();
|
||||
}
|
||||
|
||||
bool Node::isDouble() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isDouble();
|
||||
}
|
||||
|
||||
bool Node::isString() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isString();
|
||||
}
|
||||
|
||||
bool Node::isUInt() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isUInt() || slice().isSmallInt();
|
||||
}
|
||||
|
||||
bool Node::isInt() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isInt() || slice().isSmallInt();
|
||||
}
|
||||
|
||||
double Node::getDouble() const {
|
||||
if (type() == NODE) {
|
||||
throw StoreException("Must not convert NODE type to double");
|
||||
|
|
|
@ -217,15 +217,30 @@ class Node {
|
|||
/// @brief Get integer value (throws if type NODE or if conversion fails)
|
||||
int getInt() const;
|
||||
|
||||
/// @brief Is UInt
|
||||
bool isInt() const noexcept;
|
||||
|
||||
/// @brief Get insigned value (throws if type NODE or if conversion fails)
|
||||
uint64_t getUInt() const;
|
||||
|
||||
/// @brief Is UInt
|
||||
bool isUInt() const noexcept;
|
||||
|
||||
/// @brief Get bool value (throws if type NODE or if conversion fails)
|
||||
bool getBool() const;
|
||||
|
||||
/// @brief Is boolean
|
||||
bool isBool() const noexcept;
|
||||
|
||||
/// @brief Get double value (throws if type NODE or if conversion fails)
|
||||
double getDouble() const;
|
||||
|
||||
/// @brief Is double
|
||||
bool isDouble() const noexcept;
|
||||
|
||||
/// @brief Is double
|
||||
bool isString() const noexcept;
|
||||
|
||||
/// @brief Get string value (throws if type NODE or if conversion fails)
|
||||
std::string getString() const;
|
||||
|
||||
|
|
|
@ -102,12 +102,12 @@ void Supervision::upgradeZero(Builder& builder) {
|
|||
{ VPackObjectBuilder o(&builder);
|
||||
builder.add(VPackValue(failedServersPrefix));
|
||||
{ VPackObjectBuilder oo(&builder);
|
||||
try {
|
||||
if (fails.length() > 0) {
|
||||
for (auto const& fail : VPackArrayIterator(fails)) {
|
||||
builder.add(VPackValue(fail.copyString()));
|
||||
{ VPackObjectBuilder ooo(&builder); }
|
||||
}
|
||||
} catch (...) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -316,9 +316,9 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
_snapshot(currentServersRegisteredPrefix).children();
|
||||
|
||||
std::string currentFoxxmaster;
|
||||
try {
|
||||
if (_snapshot.has(foxxmaster)) {
|
||||
currentFoxxmaster = _snapshot(foxxmaster).getString();
|
||||
} catch (...) {}
|
||||
}
|
||||
|
||||
std::string goodServerId;
|
||||
bool foxxmasterOk = false;
|
||||
|
@ -461,10 +461,13 @@ bool Supervision::updateSnapshot() {
|
|||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (_agent->readDB().has(_agencyPrefix)) {
|
||||
_snapshot = _agent->readDB().get(_agencyPrefix);
|
||||
}
|
||||
|
||||
if (_agent->transient().has(_agencyPrefix)) {
|
||||
_transient = _agent->transient().get(_agencyPrefix);
|
||||
} catch (...) {}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
|
@ -555,24 +558,15 @@ void Supervision::run() {
|
|||
|
||||
// Guarded by caller
|
||||
bool Supervision::isShuttingDown() {
|
||||
try {
|
||||
return _snapshot("/Shutdown").getBool();
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
return (_snapshot.has("Shutdown") && _snapshot("Shutdown").isBool()) ?
|
||||
_snapshot("/Shutdown").getBool() : false;
|
||||
}
|
||||
|
||||
// Guarded by caller
|
||||
std::string Supervision::serverHealth(std::string const& serverName) {
|
||||
try {
|
||||
std::string const serverStatus(healthPrefix + serverName + "/Status");
|
||||
auto const status = _snapshot(serverStatus).getString();
|
||||
return status;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION)
|
||||
<< "Couldn't read server health status for server " << serverName;
|
||||
return "";
|
||||
}
|
||||
std::string const serverStatus(healthPrefix + serverName + "/Status");
|
||||
return (_snapshot.has(serverStatus)) ?
|
||||
_snapshot(serverStatus).getString() : std::string();
|
||||
}
|
||||
|
||||
// Guarded by caller
|
||||
|
@ -658,9 +652,9 @@ void Supervision::enforceReplication() {
|
|||
auto const& col = *(col_.second);
|
||||
|
||||
size_t replicationFactor;
|
||||
try {
|
||||
replicationFactor = col("replicationFactor").slice().getUInt();
|
||||
} catch (std::exception const&) {
|
||||
if (col.has("replicationFactor") && col("replicationFactor").isUInt()) {
|
||||
replicationFactor = col("replicationFactor").getUInt();
|
||||
} else {
|
||||
LOG_TOPIC(DEBUG, Logger::SUPERVISION)
|
||||
<< "no replicationFactor entry in " << col.toJson();
|
||||
continue;
|
||||
|
@ -777,11 +771,13 @@ void Supervision::shrinkCluster() {
|
|||
auto availServers = Job::availableServers(_snapshot);
|
||||
|
||||
size_t targetNumDBServers;
|
||||
try {
|
||||
targetNumDBServers = _snapshot("/Target/NumberOfDBServers").getUInt();
|
||||
} catch (std::exception const& e) {
|
||||
std::string const NDBServers ("/Target/NumberOfDBServers");
|
||||
|
||||
if (_snapshot.has(NDBServers) && _snapshot(NDBServers).isUInt()) {
|
||||
targetNumDBServers = _snapshot(NDBServers).getUInt();
|
||||
} else {
|
||||
LOG_TOPIC(TRACE, Logger::SUPERVISION)
|
||||
<< "Targeted number of DB servers not set yet: " << e.what();
|
||||
<< "Targeted number of DB servers not set yet";
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -790,7 +786,7 @@ void Supervision::shrinkCluster() {
|
|||
// Minimum 1 DB server must remain
|
||||
if (availServers.size() == 1) {
|
||||
LOG_TOPIC(DEBUG, Logger::SUPERVISION)
|
||||
<< "Only one db server left for operation";
|
||||
<< "Only one db server left for operation";
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -810,15 +806,17 @@ void Supervision::shrinkCluster() {
|
|||
auto const& databases = _snapshot(planColPrefix).children();
|
||||
for (auto const& database : databases) {
|
||||
for (auto const& collptr : database.second->children()) {
|
||||
try {
|
||||
uint64_t replFact = (*collptr.second)("replicationFactor").getUInt();
|
||||
auto const& node = *collptr.second;
|
||||
if (node.has("replicationFactor") &&
|
||||
node("replicationFactor").isUInt()) {
|
||||
auto replFact = node("replicationFactor").getUInt();
|
||||
if (replFact > maxReplFact) {
|
||||
maxReplFact = replFact;
|
||||
}
|
||||
} catch (std::exception const& e) {
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION)
|
||||
<< "Cannot retrieve replication factor for collection "
|
||||
<< collptr.first << ": " << e.what();
|
||||
<< collptr.first;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -835,7 +833,7 @@ void Supervision::shrinkCluster() {
|
|||
availServers.size() > targetNumDBServers) {
|
||||
// Sort servers by name
|
||||
std::sort(availServers.begin(), availServers.end());
|
||||
|
||||
|
||||
// Schedule last server for cleanout
|
||||
CleanOutServer(_snapshot, _agent, std::to_string(_jobId++),
|
||||
"supervision", availServers.back()).run();
|
||||
|
|
|
@ -1845,6 +1845,7 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
errorMsg += trx.toJson();
|
||||
errorMsg += "ClientId: " + result._clientId + " ";
|
||||
errorMsg += " ResultCode: " + std::to_string(result.errorCode()) + " ";
|
||||
errorMsg += " HttpCode: " + std::to_string(result.httpCode()) + " ";
|
||||
errorMsg += std::string(__FILE__) + ":" + std::to_string(__LINE__);
|
||||
resultBuilder = *resBuilder;
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Utils/CursorRepository.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
|
@ -51,7 +51,7 @@ void MMFilesCleanupThread::signal() {
|
|||
|
||||
/// @brief cleanup event loop
|
||||
void MMFilesCleanupThread::run() {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
uint64_t iterations = 0;
|
||||
|
||||
std::vector<arangodb::LogicalCollection*> collections;
|
||||
|
@ -226,8 +226,9 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!collection->getPhysical()->isFullyCollected()) {
|
||||
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(collection->getPhysical());
|
||||
if (!mmColl->isFullyCollected()) {
|
||||
bool isDeleted = false;
|
||||
|
||||
// if there is still some garbage collection to perform,
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||
#include "MMFiles/MMFilesDocumentPosition.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
|
@ -1676,7 +1677,7 @@ int MMFilesCollection::openWorker(bool ignoreErrors) {
|
|||
|
||||
try {
|
||||
// check for journals and datafiles
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->openCollection(vocbase, _logicalCollection, ignoreErrors);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -2240,9 +2241,8 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
}
|
||||
|
||||
auto cid = _logicalCollection->cid();
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->dropIndex(vocbase, cid, iid);
|
||||
|
||||
{
|
||||
bool const doSync =
|
||||
application_features::ApplicationServer::getFeature<DatabaseFeature>(
|
||||
|
|
|
@ -234,9 +234,9 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
void open(bool ignoreErrors) override;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx);
|
||||
|
||||
bool isFullyCollected() const override;
|
||||
bool isFullyCollected() const;
|
||||
|
||||
bool doCompact() const { return _doCompact; }
|
||||
|
||||
|
@ -330,7 +330,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result) override;
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
|
|
@ -25,9 +25,9 @@
|
|||
#include "Basics/WriteLocker.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
|
@ -60,7 +60,7 @@ MMFilesCollectionExport::~MMFilesCollectionExport() {
|
|||
}
|
||||
|
||||
void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
|
||||
// try to acquire the exclusive lock on the compaction
|
||||
engine->preventCompaction(_collection->vocbase(), [this](TRI_vocbase_t* vocbase) {
|
||||
|
@ -81,8 +81,9 @@ void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
|||
uint64_t tries = 0;
|
||||
uint64_t const maxTries = maxWaitTime / SleepTime;
|
||||
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
while (++tries < maxTries) {
|
||||
if (_collection->getPhysical()->isFullyCollected()) {
|
||||
if (mmColl->isFullyCollected()) {
|
||||
break;
|
||||
}
|
||||
usleep(SleepTime);
|
||||
|
@ -111,12 +112,13 @@ void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
|||
|
||||
_vpack.reserve(limit);
|
||||
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
ManagedDocumentResult mmdr;
|
||||
trx.invokeOnAllElements(_collection->name(), [this, &limit, &trx, &mmdr](DocumentIdentifierToken const& token) {
|
||||
trx.invokeOnAllElements(_collection->name(), [this, &limit, &trx, &mmdr, mmColl](DocumentIdentifierToken const& token) {
|
||||
if (limit == 0) {
|
||||
return false;
|
||||
}
|
||||
if (_collection->readDocumentConditional(&trx, token, 0, mmdr)) {
|
||||
if (mmColl->readDocumentConditional(&trx, token, 0, mmdr)) {
|
||||
_vpack.emplace_back(mmdr.vpack());
|
||||
--limit;
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
|
@ -60,7 +60,7 @@ MMFilesCollectionKeys::MMFilesCollectionKeys(TRI_vocbase_t* vocbase, std::string
|
|||
|
||||
MMFilesCollectionKeys::~MMFilesCollectionKeys() {
|
||||
// remove compaction blocker
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->removeCompactionBlocker(_vocbase, _blockerId);
|
||||
|
||||
if (_ditch != nullptr) {
|
||||
|
@ -76,7 +76,7 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
|
|||
MMFilesLogfileManager::instance()->waitForCollectorQueue(
|
||||
_collection->cid(), 30.0);
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->preventCompaction(_collection->vocbase(), [this](TRI_vocbase_t* vocbase) {
|
||||
// create a ditch under the compaction lock
|
||||
_ditch = arangodb::MMFilesCollection::toMMFilesCollection(_collection)
|
||||
|
@ -104,9 +104,10 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
|
|||
}
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
MMFilesCollection *mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
trx.invokeOnAllElements(
|
||||
_collection->name(), [this, &trx, &maxTick, &mmdr](DocumentIdentifierToken const& token) {
|
||||
if (_collection->readDocumentConditional(&trx, token, maxTick, mmdr)) {
|
||||
_collection->name(), [this, &trx, &maxTick, &mmdr, &mmColl](DocumentIdentifierToken const& token) {
|
||||
if (mmColl->readDocumentConditional(&trx, token, maxTick, mmdr)) {
|
||||
_vpack.emplace_back(mmdr.vpack());
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -35,10 +35,10 @@
|
|||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesDatafileStatisticsContainer.h"
|
||||
#include "MMFiles/MMFilesDocumentPosition.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
|
@ -856,7 +856,7 @@ void MMFilesCompactorThread::signal() {
|
|||
}
|
||||
|
||||
void MMFilesCompactorThread::run() {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
std::vector<arangodb::LogicalCollection*> collections;
|
||||
int numCompacted = 0;
|
||||
|
||||
|
|
|
@ -1604,30 +1604,6 @@ void MMFilesEngine::signalCleanup(TRI_vocbase_t* vocbase) {
|
|||
(*it).second->signal();
|
||||
}
|
||||
|
||||
// iterate all documents of the underlying collection
|
||||
// this is called when a collection is openend, and all its documents need to be
|
||||
// added to
|
||||
// indexes etc.
|
||||
void MMFilesEngine::iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb) {}
|
||||
|
||||
// adds a document to the storage engine
|
||||
// this will be called by the WAL collector when surviving documents are being
|
||||
// moved
|
||||
// into the storage engine's realm
|
||||
void MMFilesEngine::addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {}
|
||||
|
||||
// removes a document from the storage engine
|
||||
// this will be called by the WAL collector when non-surviving documents are
|
||||
// being removed
|
||||
// from the storage engine's realm
|
||||
void MMFilesEngine::removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {}
|
||||
|
||||
/// @brief scans a collection and locates all files
|
||||
MMFilesEngineCollectionFiles MMFilesEngine::scanCollectionDirectory(
|
||||
std::string const& path) {
|
||||
|
|
|
@ -267,11 +267,11 @@ class MMFilesEngine final : public StorageEngine {
|
|||
// the WAL entry for index deletion will be written *after* the call
|
||||
// to "dropIndex" returns
|
||||
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id) override;
|
||||
TRI_idx_iid_t id);
|
||||
|
||||
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data,
|
||||
bool writeMarker, int&) override;
|
||||
bool writeMarker, int&);
|
||||
|
||||
void unloadCollection(TRI_vocbase_t* vocbase,
|
||||
arangodb::LogicalCollection* collection) override;
|
||||
|
@ -298,67 +298,39 @@ class MMFilesEngine final : public StorageEngine {
|
|||
|
||||
void signalCleanup(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
|
||||
// iterate all documents of the underlying collection
|
||||
// this is called when a collection is openend, and all its documents need to
|
||||
// be added to
|
||||
// indexes etc.
|
||||
void iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb)
|
||||
override;
|
||||
|
||||
// adds a document to the storage engine
|
||||
// this will be called by the WAL collector when surviving documents are being
|
||||
// moved
|
||||
// into the storage engine's realm
|
||||
void addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
|
||||
// removes a document from the storage engine
|
||||
// this will be called by the WAL collector when non-surviving documents are
|
||||
// being removed
|
||||
// from the storage engine's realm
|
||||
void removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
|
||||
/// @brief scans a collection and locates all files
|
||||
MMFilesEngineCollectionFiles scanCollectionDirectory(std::string const& path);
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) override;
|
||||
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase);
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
|
||||
TRI_voc_tick_t& id) override;
|
||||
TRI_voc_tick_t& id);
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id,
|
||||
double ttl) override;
|
||||
double ttl);
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
int removeCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id) override;
|
||||
TRI_voc_tick_t id);
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there
|
||||
/// is no compaction ongoing
|
||||
void preventCompaction(
|
||||
TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) override;
|
||||
std::function<void(TRI_vocbase_t*)> const& callback);
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
bool tryPreventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) override;
|
||||
bool checkForActiveBlockers);
|
||||
|
||||
int shutdownDatabase(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection,
|
||||
bool ignoreErrors) override;
|
||||
bool ignoreErrors);
|
||||
|
||||
/// @brief Add engine-specific AQL functions.
|
||||
void addAqlFunctions() override;
|
||||
|
|
|
@ -38,7 +38,7 @@ using namespace arangodb;
|
|||
|
||||
/// @brief walk over the attribute. Also Extract sub-attributes and elements in
|
||||
/// list.
|
||||
static void ExtractWords(std::vector<std::string>& words,
|
||||
static void ExtractWords(std::set<std::string>& words,
|
||||
VPackSlice const value,
|
||||
size_t minWordLength,
|
||||
int level) {
|
||||
|
@ -217,7 +217,7 @@ int MMFilesFulltextIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId
|
|||
VPackSlice const& doc, bool isRollback) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
|
@ -260,8 +260,8 @@ int MMFilesFulltextIndex::cleanup() {
|
|||
|
||||
/// @brief callback function called by the fulltext index to determine the
|
||||
/// words to index for a specific document
|
||||
std::vector<std::string> MMFilesFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> MMFilesFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::set<std::string> words;
|
||||
try {
|
||||
VPackSlice const value = doc.get(_attr);
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ class MMFilesFulltextIndex final : public Index {
|
|||
TRI_voc_rid_t revisionId);
|
||||
|
||||
private:
|
||||
std::vector<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
std::set<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
||||
private:
|
||||
/// @brief the indexed attribute (path)
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "Indexes/Index.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesCollectionKeys.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/mmfiles-replication-dump.h"
|
||||
#include "Replication/InitialSyncer.h"
|
||||
|
@ -542,7 +543,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
|
|||
VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
|
||||
|
||||
TRI_voc_tick_t id;
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->insertCompactionBlocker(_vocbase, expires, id);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -575,7 +576,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
|
|||
VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
|
||||
|
||||
// now extend the blocker
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->extendCompactionBlocker(_vocbase, id, expires);
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
|
@ -591,7 +592,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
|
|||
TRI_voc_tick_t id =
|
||||
static_cast<TRI_voc_tick_t>(StringUtils::uint64(suffixes[1]));
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->removeCompactionBlocker(_vocbase, id);
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
|
@ -2357,7 +2358,7 @@ void MMFilesRestReplicationHandler::handleCommandCreateKeys() {
|
|||
TRI_ASSERT(col != nullptr);
|
||||
|
||||
// turn off the compaction for the collection
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
TRI_voc_tick_t id;
|
||||
int res = engine->insertCompactionBlocker(_vocbase, 1200.0, id);
|
||||
|
||||
|
|
|
@ -25,6 +25,9 @@
|
|||
|
||||
#include "Basics/locks.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/mmfiles-fulltext-handles.h"
|
||||
#include "MMFiles/mmfiles-fulltext-list.h"
|
||||
|
@ -86,12 +89,12 @@ typedef struct node_s {
|
|||
} node_t;
|
||||
|
||||
/// @brief the actual fulltext index
|
||||
typedef struct {
|
||||
struct index__t {
|
||||
node_t* _root; // root node of the index
|
||||
|
||||
TRI_fulltext_handles_t* _handles; // handles management instance
|
||||
|
||||
TRI_read_write_lock_t _lock;
|
||||
arangodb::basics::ReadWriteLock _lock;
|
||||
|
||||
size_t _memoryAllocated; // total memory used by index
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
|
@ -103,7 +106,7 @@ typedef struct {
|
|||
|
||||
uint32_t _nodeChunkSize; // how many sub-nodes to allocate per chunk
|
||||
uint32_t _initialNodeHandles; // how many handles to allocate per node
|
||||
} index__t;
|
||||
};
|
||||
|
||||
static uint32_t NodeNumFollowers(const node_t* const);
|
||||
|
||||
|
@ -1099,8 +1102,7 @@ static inline size_t CommonPrefixLength(std::string const& left,
|
|||
TRI_fts_index_t* TRI_CreateFtsIndex(uint32_t handleChunkSize,
|
||||
uint32_t nodeChunkSize,
|
||||
uint32_t initialNodeHandles) {
|
||||
index__t* idx = static_cast<index__t*>(
|
||||
TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(index__t), false));
|
||||
index__t* idx = new index__t();
|
||||
|
||||
if (idx == nullptr) {
|
||||
return nullptr;
|
||||
|
@ -1140,14 +1142,12 @@ TRI_fts_index_t* TRI_CreateFtsIndex(uint32_t handleChunkSize,
|
|||
idx->_memoryBase += sizeof(TRI_fulltext_handles_t);
|
||||
#endif
|
||||
|
||||
TRI_InitReadWriteLock(&idx->_lock);
|
||||
|
||||
return (TRI_fts_index_t*)idx;
|
||||
}
|
||||
|
||||
/// @brief free the fulltext index
|
||||
void TRI_FreeFtsIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
// free root node (this will recursively free all other nodes)
|
||||
FreeNode(idx, idx->_root);
|
||||
|
@ -1165,14 +1165,12 @@ void TRI_FreeFtsIndex(TRI_fts_index_t* ftx) {
|
|||
TRI_ASSERT(idx->_memoryAllocated == sizeof(index__t));
|
||||
#endif
|
||||
|
||||
TRI_DestroyReadWriteLock(&idx->_lock);
|
||||
|
||||
// free index itself
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, idx);
|
||||
delete idx;
|
||||
}
|
||||
|
||||
void TRI_TruncateMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
// free root node (this will recursively free all other nodes)
|
||||
FreeNode(idx, idx->_root);
|
||||
|
@ -1213,11 +1211,10 @@ void TRI_TruncateMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
|||
/// @brief delete a document from the index
|
||||
void TRI_DeleteDocumentMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
||||
const TRI_voc_rid_t document) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_WriteLockReadWriteLock(&idx->_lock);
|
||||
WRITE_LOCKER(guard, idx->_lock);
|
||||
TRI_DeleteDocumentHandleMMFilesFulltextIndex(idx->_handles, document);
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
}
|
||||
|
||||
/// @brief insert a list of words into the index
|
||||
|
@ -1231,7 +1228,7 @@ void TRI_DeleteDocumentMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
/// prefixes
|
||||
bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
||||
const TRI_voc_rid_t document,
|
||||
std::vector<std::string>& wordlist) {
|
||||
std::set<std::string>& wordlist) {
|
||||
index__t* idx;
|
||||
TRI_fulltext_handle_t handle;
|
||||
node_t* paths[MAX_WORD_BYTES + 4];
|
||||
|
@ -1250,16 +1247,15 @@ bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
// for words with common prefixes (which will be adjacent in the sorted list
|
||||
// of words)
|
||||
// The default comparator (<) is exactly what we need here
|
||||
std::sort(wordlist.begin(), wordlist.end());
|
||||
//std::sort(wordlist.begin(), wordlist.end());
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_WriteLockReadWriteLock(&idx->_lock);
|
||||
WRITE_LOCKER(guard, idx->_lock);
|
||||
|
||||
// get a new handle for the document
|
||||
handle = TRI_InsertHandleMMFilesFulltextIndex(idx->_handles, document);
|
||||
if (handle == 0) {
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1268,19 +1264,15 @@ bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
// start for the 1st word inserted
|
||||
paths[0] = idx->_root;
|
||||
lastLength = 0;
|
||||
|
||||
size_t w = 0;
|
||||
size_t numWords = wordlist.size();
|
||||
while (w < numWords) {
|
||||
|
||||
std::string const* prev = nullptr;
|
||||
for (std::string const& tmp : wordlist) {
|
||||
node_t* node;
|
||||
char const* p;
|
||||
size_t start;
|
||||
size_t i;
|
||||
|
||||
// LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "checking word " << wordlist->_words[w];
|
||||
|
||||
if (w > 0) {
|
||||
std::string const& tmp = wordlist[w];
|
||||
|
||||
if (prev != nullptr) {
|
||||
// check if current word has a shared/common prefix with the previous word
|
||||
// inserted
|
||||
// in case this is true, we can use an optimisation and do not need to
|
||||
|
@ -1288,70 +1280,66 @@ bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
// tree from the root again. instead, we just start at the node at the end
|
||||
// of the
|
||||
// shared/common prefix. this will save us a lot of tree lookups
|
||||
start = CommonPrefixLength(wordlist[w - 1], tmp);
|
||||
start = CommonPrefixLength(*prev, tmp);
|
||||
if (start > MAX_WORD_BYTES) {
|
||||
start = MAX_WORD_BYTES;
|
||||
}
|
||||
|
||||
|
||||
// check if current word is the same as the last word. we do not want to
|
||||
// insert the
|
||||
// same word multiple times for the same document
|
||||
if (start > 0 && start == lastLength &&
|
||||
start == tmp.size()) {
|
||||
// duplicate word, skip it and continue with next word
|
||||
w++;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
start = 0;
|
||||
}
|
||||
|
||||
prev = &tmp;
|
||||
|
||||
// for words with common prefixes, use the most appropriate start node we
|
||||
// do not need to traverse the tree from the root again
|
||||
node = paths[start];
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
TRI_ASSERT(node != nullptr);
|
||||
#endif
|
||||
|
||||
|
||||
// now insert into the tree, starting at the next character after the common
|
||||
// prefix
|
||||
std::string tmp = wordlist[w++].substr(start);
|
||||
p = tmp.c_str();
|
||||
|
||||
//std::string suffix = tmp.substr(start);
|
||||
p = tmp.c_str() + start;
|
||||
|
||||
for (i = start; *p && i <= MAX_WORD_BYTES; ++i) {
|
||||
node_char_t c = (node_char_t) * (p++);
|
||||
|
||||
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
TRI_ASSERT(node != nullptr);
|
||||
#endif
|
||||
|
||||
|
||||
node = EnsureSubNode(idx, node, c);
|
||||
if (node == nullptr) {
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
TRI_ASSERT(node != nullptr);
|
||||
#endif
|
||||
|
||||
|
||||
paths[i + 1] = node;
|
||||
}
|
||||
|
||||
|
||||
if (!InsertHandle(idx, node, handle)) {
|
||||
// document was added at least once, mark it as deleted
|
||||
TRI_DeleteDocumentHandleMMFilesFulltextIndex(idx->_handles, document);
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// store length of word just inserted
|
||||
// we'll use that to compare with the next word for duplicate removal
|
||||
lastLength = i;
|
||||
}
|
||||
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1393,9 +1381,9 @@ TRI_fulltext_result_t* TRI_QueryMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
|
||||
auto maxResults = query->_maxResults;
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_ReadLockReadWriteLock(&idx->_lock);
|
||||
READ_LOCKER(guard, idx->_lock);
|
||||
|
||||
// initial result is empty
|
||||
result = nullptr;
|
||||
|
@ -1463,22 +1451,20 @@ TRI_fulltext_result_t* TRI_QueryMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
|
||||
if (result == nullptr) {
|
||||
// if we haven't found anything...
|
||||
TRI_ReadUnlockReadWriteLock(&idx->_lock);
|
||||
return TRI_CreateResultMMFilesFulltextIndex(0);
|
||||
}
|
||||
|
||||
// now convert the handle list into a result (this will also filter out
|
||||
// deleted documents)
|
||||
TRI_fulltext_result_t* r = MakeListResult(idx, result, maxResults);
|
||||
TRI_ReadUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/// @brief dump index tree
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpTreeFtsIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
void TRI_DumpTreeFtsIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_DumpHandleMMFilesFulltextIndex(idx->_handles);
|
||||
DumpNode(idx->_root, 0);
|
||||
|
@ -1487,8 +1473,8 @@ void TRI_DumpTreeFtsIndex(const TRI_fts_index_t* const ftx) {
|
|||
|
||||
/// @brief dump index statistics
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpStatsFtsIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
void TRI_DumpStatsFtsIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
TRI_fulltext_stats_t stats;
|
||||
|
||||
stats = TRI_StatsMMFilesFulltextIndex(idx);
|
||||
|
@ -1513,14 +1499,12 @@ void TRI_DumpStatsFtsIndex(const TRI_fts_index_t* const ftx) {
|
|||
#endif
|
||||
|
||||
/// @brief return stats about the index
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx;
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
READ_LOCKER(guard, idx->_lock);
|
||||
|
||||
TRI_fulltext_stats_t stats;
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
|
||||
TRI_ReadLockReadWriteLock(&idx->_lock);
|
||||
|
||||
stats._memoryTotal = TRI_MemoryMMFilesFulltextIndex(idx);
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
stats._memoryOwn = idx->_memoryAllocated;
|
||||
|
@ -1547,14 +1531,14 @@ TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(const TRI_fts_index_t* const
|
|||
stats._shouldCompact = false;
|
||||
}
|
||||
|
||||
TRI_ReadUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
/// @brief return the total memory used by the index
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
// no need to lock here, as we are called from under a lock already
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
if (idx->_handles != nullptr) {
|
||||
return idx->_memoryAllocated + TRI_MemoryHandleMMFilesFulltextIndex(idx->_handles);
|
||||
|
@ -1565,21 +1549,18 @@ size_t TRI_MemoryMMFilesFulltextIndex(const TRI_fts_index_t* const ftx) {
|
|||
|
||||
/// @brief compact the fulltext index
|
||||
/// note: the caller must hold a lock on the index before called this
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const ftx) {
|
||||
index__t* idx;
|
||||
TRI_fulltext_handles_t* clone;
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
// but don't block if the index is busy
|
||||
// try to acquire the write lock to clean up
|
||||
if (!TRI_TryWriteLockReadWriteLock(&idx->_lock)) {
|
||||
TRY_WRITE_LOCKER(guard, idx->_lock);
|
||||
if (!guard.isLocked()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!TRI_ShouldCompactHandleMMFilesFulltextIndex(idx->_handles)) {
|
||||
// not enough cleanup work to do
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1587,9 +1568,8 @@ bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const ftx) {
|
|||
// re-align the handle numbers consecutively, starting at 1.
|
||||
// this will also populate the _map property, which can be used to clean up
|
||||
// handles of existing nodes
|
||||
clone = TRI_CompactHandleMMFilesFulltextIndex(idx->_handles);
|
||||
TRI_fulltext_handles_t* clone = TRI_CompactHandleMMFilesFulltextIndex(idx->_handles);
|
||||
if (clone == nullptr) {
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1604,7 +1584,6 @@ bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const ftx) {
|
|||
|
||||
// cleanup finished, now switch over
|
||||
idx->_handles = clone;
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ void TRI_DeleteDocumentMMFilesFulltextIndex(TRI_fts_index_t* const,
|
|||
/// @brief insert a list of words to the index
|
||||
bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const,
|
||||
const TRI_voc_rid_t,
|
||||
std::vector<std::string>&);
|
||||
std::set<std::string>&);
|
||||
|
||||
/// @brief find all documents that contain a word (exact match)
|
||||
#if 0
|
||||
|
@ -95,21 +95,21 @@ struct TRI_fulltext_result_s* TRI_QueryMMFilesFulltextIndex(
|
|||
|
||||
/// @brief dump index tree
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpTreeFtsIndex(const TRI_fts_index_t* const);
|
||||
void TRI_DumpTreeFtsIndex(TRI_fts_index_t*);
|
||||
#endif
|
||||
|
||||
/// @brief dump index statistics
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpStatsFtsIndex(const TRI_fts_index_t* const);
|
||||
void TRI_DumpStatsFtsIndex(TRI_fts_index_t*);
|
||||
#endif
|
||||
|
||||
/// @brief return stats about the index
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(const TRI_fts_index_t* const);
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(TRI_fts_index_t*);
|
||||
|
||||
/// @brief return the total memory used by the index
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(const TRI_fts_index_t* const);
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(TRI_fts_index_t*);
|
||||
|
||||
/// @brief compact the fulltext index
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const);
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t*);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -28,9 +28,9 @@
|
|||
#include "RocksDBEngine/RocksDBGeoIndex.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "StorageEngine/DocumentIdentifierToken.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
@ -164,10 +164,9 @@ static arangodb::RocksDBGeoIndex* getGeoIndex(
|
|||
trx->addCollectionAtRuntime(cid, collectionName);
|
||||
Result res = trx->state()->ensureCollections();
|
||||
if (!res.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(),
|
||||
res.errorMessage());
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(res.errorNumber(), res.errorMessage());
|
||||
}
|
||||
|
||||
|
||||
auto document = trx->documentCollection(cid);
|
||||
if (document == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_FORMAT(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND, "'%s'",
|
||||
|
@ -195,7 +194,8 @@ static arangodb::RocksDBGeoIndex* getGeoIndex(
|
|||
static AqlValue buildGeoResult(transaction::Methods* trx,
|
||||
LogicalCollection* collection,
|
||||
arangodb::aql::Query* query,
|
||||
GeoCoordinates* cors, TRI_voc_cid_t const& cid,
|
||||
rocksdbengine::GeoCoordinates* cors,
|
||||
TRI_voc_cid_t const& cid,
|
||||
std::string const& attributeName) {
|
||||
if (cors == nullptr) {
|
||||
return AqlValue(arangodb::basics::VelocyPackHelper::EmptyArrayValue());
|
||||
|
@ -328,7 +328,7 @@ AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
|
|||
TRI_ASSERT(index != nullptr);
|
||||
TRI_ASSERT(trx->isPinned(cid));
|
||||
|
||||
GeoCoordinates* cors =
|
||||
rocksdbengine::GeoCoordinates* cors =
|
||||
index->nearQuery(trx, latitude.toDouble(trx), longitude.toDouble(trx),
|
||||
static_cast<size_t>(limitValue));
|
||||
|
||||
|
@ -382,9 +382,9 @@ AqlValue RocksDBAqlFunctions::Within(
|
|||
TRI_ASSERT(index != nullptr);
|
||||
TRI_ASSERT(trx->isPinned(cid));
|
||||
|
||||
GeoCoordinates* cors = index->withinQuery(trx, latitudeValue.toDouble(trx),
|
||||
longitudeValue.toDouble(trx),
|
||||
radiusValue.toDouble(trx));
|
||||
rocksdbengine::GeoCoordinates* cors = index->withinQuery(
|
||||
trx, latitudeValue.toDouble(trx), longitudeValue.toDouble(trx),
|
||||
radiusValue.toDouble(trx));
|
||||
|
||||
return buildGeoResult(trx, index->collection(), query, cors, cid,
|
||||
attributeName);
|
||||
|
|
|
@ -187,8 +187,6 @@ void RocksDBCollection::open(bool ignoreErrors) {
|
|||
_revisionId = counterValue.revisionId();
|
||||
|
||||
for (std::shared_ptr<Index> it : getIndexes()) {
|
||||
static_cast<RocksDBIndex*>(it.get())->load();
|
||||
|
||||
if (it->type() == Index::TRI_IDX_TYPE_GEO1_INDEX ||
|
||||
it->type() == Index::TRI_IDX_TYPE_GEO2_INDEX) {
|
||||
_hasGeoIndex = true;
|
||||
|
@ -196,18 +194,6 @@ void RocksDBCollection::open(bool ignoreErrors) {
|
|||
}
|
||||
}
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int RocksDBCollection::iterateMarkersOnLoad(
|
||||
arangodb::transaction::Methods* trx) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool RocksDBCollection::isFullyCollected() const {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
void RocksDBCollection::prepareIndexes(
|
||||
arangodb::velocypack::Slice indexesSlice) {
|
||||
TRI_ASSERT(indexesSlice.isArray());
|
||||
|
@ -839,15 +825,6 @@ bool RocksDBCollection::readDocument(transaction::Methods* trx,
|
|||
return res.ok();
|
||||
}
|
||||
|
||||
bool RocksDBCollection::readDocumentConditional(
|
||||
transaction::Methods* trx, DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
|
||||
// should not be called for RocksDB engine. TODO: move this out of general
|
||||
// API!
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const slice,
|
||||
arangodb::ManagedDocumentResult& mdr,
|
||||
|
@ -1354,7 +1331,7 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
Result r;
|
||||
bool hasMore = true;
|
||||
while (hasMore) {
|
||||
hasMore = iter->next(cb, 5000);
|
||||
hasMore = iter->next(cb, 250);
|
||||
if (_logicalCollection->status() == TRI_VOC_COL_STATUS_DELETED ||
|
||||
_logicalCollection->deleted()) {
|
||||
res = TRI_ERROR_INTERNAL;
|
||||
|
@ -1375,7 +1352,8 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
// occured, this needs to happen since we are non transactional
|
||||
if (!r.ok()) {
|
||||
iter->reset();
|
||||
rocksdb::WriteBatch removeBatch(32 * 1024 * 1024);
|
||||
rocksdb::WriteBatchWithIndex removeBatch(db->DefaultColumnFamily()->GetComparator(),
|
||||
32 * 1024 * 1024);
|
||||
|
||||
res = TRI_ERROR_NO_ERROR;
|
||||
auto removeCb = [&](DocumentIdentifierToken token) {
|
||||
|
@ -1396,7 +1374,7 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
}
|
||||
// TODO: if this fails, do we have any recourse?
|
||||
// Simon: Don't think so
|
||||
db->Write(writeOpts, &removeBatch);
|
||||
db->Write(writeOpts, removeBatch.GetWriteBatch());
|
||||
}
|
||||
|
||||
return r;
|
||||
|
|
|
@ -80,11 +80,6 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
size_t memory() const override;
|
||||
void open(bool ignoreErrors) override;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
|
||||
|
||||
bool isFullyCollected() const override;
|
||||
|
||||
////////////////////////////////////
|
||||
// -- SECTION Indexes --
|
||||
///////////////////////////////////
|
||||
|
@ -133,11 +128,6 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) override;
|
||||
|
||||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result) override;
|
||||
|
||||
int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
arangodb::ManagedDocumentResult& result, OperationOptions& options,
|
||||
|
|
|
@ -52,30 +52,7 @@ RocksDBCollectionExport::RocksDBCollectionExport(
|
|||
|
||||
RocksDBCollectionExport::~RocksDBCollectionExport() {}
|
||||
|
||||
void RocksDBCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
||||
|
||||
// none of this should matter on rocksdb
|
||||
// try to acquire the exclusive lock on the compaction
|
||||
/*
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
engine->preventCompaction(_collection->vocbase(),
|
||||
[this](TRI_vocbase_t* vocbase) {
|
||||
// TODO: do something under compaction lock?
|
||||
});
|
||||
|
||||
{
|
||||
static uint64_t const SleepTime = 10000;
|
||||
|
||||
uint64_t tries = 0;
|
||||
uint64_t const maxTries = maxWaitTime / SleepTime;
|
||||
|
||||
while (++tries < maxTries) {
|
||||
if (_collection->getPhysical()->isFullyCollected()) {
|
||||
break;
|
||||
}
|
||||
usleep(SleepTime);
|
||||
}
|
||||
}*/
|
||||
void RocksDBCollectionExport::run(size_t limit) {
|
||||
|
||||
{
|
||||
SingleCollectionTransaction trx(
|
||||
|
@ -110,10 +87,6 @@ void RocksDBCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
|||
_vpack.emplace_back(VPackSlice(mmdr.vpack()));
|
||||
--limit;
|
||||
}
|
||||
/*if (_collection->readDocumentConditional(&trx, token, 0, mmdr)) {
|
||||
_vpack.emplace_back(VPackSlice(mmdr.vpack()));
|
||||
--limit;
|
||||
}*/
|
||||
return true;
|
||||
});
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ class RocksDBCollectionExport {
|
|||
~RocksDBCollectionExport();
|
||||
|
||||
public:
|
||||
void run(uint64_t, size_t);
|
||||
void run(size_t);
|
||||
|
||||
private:
|
||||
std::unique_ptr<arangodb::CollectionGuard> _guard;
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cache/CachedValue.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
#include "Indexes/SimpleAttributeEqualityMatcher.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
|
@ -44,9 +46,8 @@
|
|||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
|
||||
#include <rocksdb/db.h>
|
||||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/slice.h>
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include <rocksdb/utilities/write_batch_with_index.h>
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -57,35 +58,29 @@ using namespace arangodb::basics;
|
|||
RocksDBEdgeIndexIterator::RocksDBEdgeIndexIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, arangodb::RocksDBEdgeIndex const* index,
|
||||
std::unique_ptr<VPackBuilder>& keys)
|
||||
std::unique_ptr<VPackBuilder>& keys,
|
||||
bool useCache, cache::Cache* cache)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_keys(keys.get()),
|
||||
_keysIterator(_keys->slice()),
|
||||
_index(index),
|
||||
_bounds(RocksDBKeyBounds::EdgeIndex(0)) {
|
||||
_arrayIterator(VPackSlice::emptyArraySlice()),
|
||||
_bounds(RocksDBKeyBounds::EdgeIndex(0)),
|
||||
_doUpdateBounds(true),
|
||||
_useCache(useCache),
|
||||
_cache(cache)
|
||||
{
|
||||
keys.release(); // now we have ownership for _keys
|
||||
TRI_ASSERT(_keys->slice().isArray());
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(_trx);
|
||||
TRI_ASSERT(state != nullptr);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
_iterator.reset(rtrx->GetIterator(state->readOptions()));
|
||||
updateBounds();
|
||||
}
|
||||
|
||||
bool RocksDBEdgeIndexIterator::updateBounds() {
|
||||
if (_keysIterator.valid()) {
|
||||
VPackSlice fromTo = _keysIterator.value();
|
||||
if (fromTo.isObject()) {
|
||||
fromTo = fromTo.get(StaticStrings::IndexEq);
|
||||
}
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
_bounds =
|
||||
RocksDBKeyBounds::EdgeIndexVertex(_index->_objectId, StringRef(fromTo));
|
||||
|
||||
void RocksDBEdgeIndexIterator::updateBounds(StringRef fromTo) {
|
||||
_bounds = RocksDBKeyBounds::EdgeIndexVertex(_index->_objectId, fromTo);
|
||||
_iterator->Seek(_bounds.start());
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
RocksDBEdgeIndexIterator::~RocksDBEdgeIndexIterator() {
|
||||
|
@ -95,7 +90,18 @@ RocksDBEdgeIndexIterator::~RocksDBEdgeIndexIterator() {
|
|||
}
|
||||
}
|
||||
|
||||
StringRef getFromToFromIterator(arangodb::velocypack::ArrayIterator const& it){
|
||||
VPackSlice fromTo = it.value();
|
||||
if (fromTo.isObject()) {
|
||||
fromTo = fromTo.get(StaticStrings::IndexEq);
|
||||
}
|
||||
TRI_ASSERT(fromTo.isString());
|
||||
return StringRef(fromTo);
|
||||
}
|
||||
|
||||
bool RocksDBEdgeIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "rockdb edge index next";
|
||||
std::size_t cacheValueSizeLimit = limit;
|
||||
TRI_ASSERT(_trx->state()->isRunning());
|
||||
|
||||
if (limit == 0 || !_keysIterator.valid()) {
|
||||
|
@ -105,40 +111,152 @@ bool RocksDBEdgeIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
return false;
|
||||
}
|
||||
|
||||
// acquire rocksdb collection
|
||||
auto rocksColl = toRocksDBCollection(_collection);
|
||||
// acquire RocksDB collection
|
||||
RocksDBToken token;
|
||||
|
||||
while (true) {
|
||||
while (_keysIterator.valid()) {
|
||||
TRI_ASSERT(limit > 0);
|
||||
StringRef fromTo = getFromToFromIterator(_keysIterator);
|
||||
bool foundInCache = false;
|
||||
|
||||
// TODO: set options.iterate_upper_bound and remove compare?
|
||||
while (_iterator->Valid() &&
|
||||
(_index->_cmp->Compare(_iterator->key(), _bounds.end()) < 0)) {
|
||||
StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key());
|
||||
if (_useCache){
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "using cache";
|
||||
// find in cache
|
||||
foundInCache = false;
|
||||
// handle resume of next() in cached case
|
||||
if(!_arrayBuffer.empty()){
|
||||
// resume iteration after batch size limit was hit
|
||||
// do not modify buffer or iterator
|
||||
foundInCache = true;
|
||||
} else {
|
||||
// try to find cached value
|
||||
auto f = _cache->find(fromTo.data(),fromTo.size());
|
||||
foundInCache = f.found();
|
||||
if (foundInCache) {
|
||||
VPackSlice cachedPrimaryKeys(f.value()->value());
|
||||
TRI_ASSERT(cachedPrimaryKeys.isArray());
|
||||
|
||||
// acquire the document token through the primary index
|
||||
RocksDBToken token;
|
||||
Result res = rocksColl->lookupDocumentToken(_trx, edgeKey, token);
|
||||
_iterator->Next();
|
||||
if (res.ok()) {
|
||||
cb(token);
|
||||
--limit;
|
||||
if (limit == 0) {
|
||||
return true;
|
||||
if(cachedPrimaryKeys.length() <= limit){
|
||||
// do not copy
|
||||
_arraySlice = cachedPrimaryKeys;
|
||||
} else {
|
||||
// copy data if there are more documents than the batch size limit allows
|
||||
_arrayBuffer.append(cachedPrimaryKeys.start(),cachedPrimaryKeys.byteSize());
|
||||
_arraySlice = VPackSlice(_arrayBuffer.data());
|
||||
}
|
||||
// update iterator
|
||||
_arrayIterator = VPackArrayIterator(_arraySlice);
|
||||
} else {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "not found in cache " << fromTo;
|
||||
}
|
||||
} // TODO do we need to handle failed lookups here?
|
||||
}
|
||||
|
||||
if (foundInCache) {
|
||||
//iterate over cached primary keys
|
||||
for(VPackSlice const& slice: _arrayIterator){
|
||||
StringRef edgeKey(slice);
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "using value form cache " << slice.toJson();
|
||||
if(lookupDocumentAndUseCb(edgeKey, cb, limit, token)){
|
||||
return true; // more documents - function will be re-entered
|
||||
} else {
|
||||
//iterate over keys
|
||||
}
|
||||
}
|
||||
_arrayIterator.reset();
|
||||
_arrayBuffer.clear();
|
||||
_keysIterator.next(); // handle next key
|
||||
continue; // do not use the code below that does a lookup in RocksDB
|
||||
}
|
||||
} else {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "not using cache";
|
||||
}
|
||||
|
||||
_keysIterator.next();
|
||||
if (!updateBounds()) {
|
||||
return false;
|
||||
}
|
||||
if(!foundInCache) {
|
||||
// cache lookup failed for key value we need to look up
|
||||
// primary keys in RocksDB
|
||||
|
||||
// if there are more documents in the iterator then
|
||||
// we are not allowed to reset the index.
|
||||
// if _doUpdateBound is set to false we resume
|
||||
// returning from an valid iterator after hitting
|
||||
// the batch size limit
|
||||
if(_doUpdateBounds){
|
||||
updateBounds(fromTo);
|
||||
if(_useCache){
|
||||
_cacheValueBuilder.openArray();
|
||||
_cacheValueSize = cacheValueSizeLimit;
|
||||
}
|
||||
} else {
|
||||
_doUpdateBounds = true;
|
||||
}
|
||||
|
||||
while (_iterator->Valid() && (_index->_cmp->Compare(_iterator->key(), _bounds.end()) < 0)) {
|
||||
StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key());
|
||||
if(_useCache){
|
||||
if (_cacheValueSize < cacheValueSizeLimit){
|
||||
_cacheValueBuilder.add(VPackValuePair(edgeKey.data(),edgeKey.size()));
|
||||
++_cacheValueSize;
|
||||
} else {
|
||||
_cacheValueBuilder.clear();
|
||||
_cacheValueBuilder.openArray();
|
||||
}
|
||||
}
|
||||
|
||||
if(lookupDocumentAndUseCb(edgeKey, cb, limit, token)){
|
||||
return true; // more documents - function will be re-entered
|
||||
} else {
|
||||
// batch size limit not reached continue loop
|
||||
}
|
||||
}
|
||||
|
||||
//only add entry if cache is available and entry did not hit size limit
|
||||
if (_useCache) {
|
||||
_cacheValueBuilder.close();
|
||||
if(!_cacheValueBuilder.isEmpty() && _cacheValueBuilder.slice().length() > 0) {
|
||||
auto entry = cache::CachedValue::construct(
|
||||
fromTo.data(), static_cast<uint32_t>(fromTo.size()),
|
||||
_cacheValueBuilder.slice().start(), static_cast<uint64_t>(_cacheValueBuilder.slice().byteSize()));
|
||||
bool cached = _cache->insert(entry);
|
||||
if (!cached) {
|
||||
delete entry;
|
||||
}
|
||||
} // builder not empty
|
||||
} // use cache
|
||||
|
||||
} // not found in cache
|
||||
|
||||
_keysIterator.next(); // handle next key
|
||||
}
|
||||
return false; // no more documents in this iterator
|
||||
}
|
||||
|
||||
// acquire the document token through the primary index
|
||||
bool RocksDBEdgeIndexIterator::lookupDocumentAndUseCb(
|
||||
StringRef primaryKey, TokenCallback const& cb,
|
||||
size_t& limit, RocksDBToken& token){
|
||||
//we pass the token in as ref to avoid allocations
|
||||
auto rocksColl = toRocksDBCollection(_collection);
|
||||
Result res = rocksColl->lookupDocumentToken(_trx, primaryKey, token);
|
||||
_iterator->Next();
|
||||
if (res.ok()) {
|
||||
cb(token);
|
||||
--limit;
|
||||
if (limit == 0) {
|
||||
_doUpdateBounds=false; //limit hit continue with next batch
|
||||
return true;
|
||||
}
|
||||
} // TODO do we need to handle failed lookups here?
|
||||
return false; // limit not hit continue in while loop
|
||||
}
|
||||
|
||||
void RocksDBEdgeIndexIterator::reset() {
|
||||
//rest offsets into iterators
|
||||
_doUpdateBounds = true;
|
||||
_cacheValueBuilder.clear();
|
||||
_arrayBuffer.clear();
|
||||
_arraySlice = VPackSlice::emptyArraySlice();
|
||||
_arrayIterator = VPackArrayIterator(_arraySlice);
|
||||
_keysIterator.reset();
|
||||
updateBounds();
|
||||
}
|
||||
|
||||
// ============================= Index ====================================
|
||||
|
@ -147,13 +265,22 @@ RocksDBEdgeIndex::RocksDBEdgeIndex(TRI_idx_iid_t iid,
|
|||
arangodb::LogicalCollection* collection,
|
||||
VPackSlice const& info,
|
||||
std::string const& attr)
|
||||
: RocksDBIndex(iid, collection,
|
||||
std::vector<std::vector<AttributeName>>(
|
||||
{{AttributeName(attr, false)}}),
|
||||
false, false,
|
||||
basics::VelocyPackHelper::stringUInt64(info, "objectId")),
|
||||
_directionAttr(attr) {
|
||||
: RocksDBIndex(iid
|
||||
,collection
|
||||
,std::vector<std::vector<AttributeName>>({{AttributeName(attr, false)}})
|
||||
,false
|
||||
,false
|
||||
,basics::VelocyPackHelper::stringUInt64(info, "objectId")
|
||||
,false //!ServerState::instance()->isCoordinator() /*useCache*/
|
||||
)
|
||||
, _directionAttr(attr)
|
||||
{
|
||||
TRI_ASSERT(iid != 0);
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
if (_objectId == 0) {
|
||||
//disable cache?
|
||||
_useCache = false;
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBEdgeIndex::~RocksDBEdgeIndex() {}
|
||||
|
@ -202,27 +329,23 @@ void RocksDBEdgeIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
|
|||
bool forPersistence) const {
|
||||
builder.openObject();
|
||||
RocksDBIndex::toVelocyPack(builder, withFigures, forPersistence);
|
||||
// add slectivity estimate hard-coded
|
||||
// add selectivity estimate hard-coded
|
||||
builder.add("unique", VPackValue(false));
|
||||
builder.add("sparse", VPackValue(false));
|
||||
builder.close();
|
||||
}
|
||||
|
||||
/// @brief return a VelocyPack representation of the index figures
|
||||
void RocksDBEdgeIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
||||
Index::toVelocyPackFigures(builder);
|
||||
// TODO
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
int RocksDBEdgeIndex::insert(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId, VPackSlice const& doc,
|
||||
bool isRollback) {
|
||||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
auto fromToRef=StringRef(fromTo);
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef,
|
||||
StringRef(primaryKey));
|
||||
//blacklist key in cache
|
||||
blackListKey(fromToRef);
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
|
@ -247,10 +370,14 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
|
|||
bool isRollback) {
|
||||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
auto fromToRef=StringRef(fromTo);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef,
|
||||
StringRef(primaryKey));
|
||||
|
||||
//blacklist key in cache
|
||||
blackListKey(fromToRef);
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
@ -263,15 +390,9 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int RocksDBEdgeIndex::removeRaw(rocksdb::WriteBatch* writeBatch, TRI_voc_rid_t,
|
||||
VPackSlice const& doc) {
|
||||
VPackSlice primaryKey = doc.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
StringRef(primaryKey));
|
||||
writeBatch->Delete(rocksdb::Slice(key.string()));
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
int RocksDBEdgeIndex::removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
VPackSlice const&) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
void RocksDBEdgeIndex::batchInsert(
|
||||
|
@ -286,9 +407,11 @@ void RocksDBEdgeIndex::batchInsert(
|
|||
VPackSlice primaryKey = doc.second.get(StaticStrings::KeyString);
|
||||
VPackSlice fromTo = doc.second.get(_directionAttr);
|
||||
TRI_ASSERT(primaryKey.isString() && fromTo.isString());
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, StringRef(fromTo),
|
||||
auto fromToRef=StringRef(fromTo);
|
||||
RocksDBKey key = RocksDBKey::EdgeIndexValue(_objectId, fromToRef,
|
||||
StringRef(primaryKey));
|
||||
|
||||
blackListKey(fromToRef);
|
||||
rocksdb::Status status =
|
||||
rtrx->Put(rocksdb::Slice(key.string()), rocksdb::Slice());
|
||||
if (!status.ok()) {
|
||||
|
@ -323,22 +446,24 @@ bool RocksDBEdgeIndex::supportsFilterCondition(
|
|||
IndexIterator* RocksDBEdgeIndex::iteratorForCondition(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
arangodb::aql::AstNode const* node,
|
||||
|
||||
arangodb::aql::Variable const* reference, bool reverse) {
|
||||
|
||||
//get computation node
|
||||
TRI_ASSERT(node->type == aql::NODE_TYPE_OPERATOR_NARY_AND);
|
||||
|
||||
TRI_ASSERT(node->numMembers() == 1);
|
||||
|
||||
auto comp = node->getMember(0);
|
||||
|
||||
// assume a.b == value
|
||||
auto attrNode = comp->getMember(0);
|
||||
auto valNode = comp->getMember(1);
|
||||
|
||||
// got value == a.b -> flip sides
|
||||
if (attrNode->type != aql::NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
// got value == a.b -> flip sides
|
||||
attrNode = comp->getMember(1);
|
||||
valNode = comp->getMember(0);
|
||||
}
|
||||
|
||||
TRI_ASSERT(attrNode->type == aql::NODE_TYPE_ATTRIBUTE_ACCESS);
|
||||
TRI_ASSERT(attrNode->stringEquals(_directionAttr));
|
||||
|
||||
|
@ -353,7 +478,6 @@ IndexIterator* RocksDBEdgeIndex::iteratorForCondition(
|
|||
// a.b IN non-array
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
return createInIterator(trx, mmdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
|
@ -423,7 +547,11 @@ IndexIterator* RocksDBEdgeIndex::createEqIterator(
|
|||
}
|
||||
keys->close();
|
||||
|
||||
return new RocksDBEdgeIndexIterator(_collection, trx, mmdr, this, keys);
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "_useCache: " << _useCache
|
||||
// << " _cachePresent: " << _cachePresent
|
||||
// << " useCache():" << useCache();
|
||||
return new RocksDBEdgeIndexIterator(
|
||||
_collection, trx, mmdr, this, keys, useCache(), _cache.get());
|
||||
}
|
||||
|
||||
/// @brief create the iterator
|
||||
|
@ -449,7 +577,9 @@ IndexIterator* RocksDBEdgeIndex::createInIterator(
|
|||
}
|
||||
keys->close();
|
||||
|
||||
return new RocksDBEdgeIndexIterator(_collection, trx, mmdr, this, keys);
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "useCache: " << _useCache << useCache();
|
||||
return new RocksDBEdgeIndexIterator(
|
||||
_collection, trx, mmdr, this, keys, useCache(), _cache.get());
|
||||
}
|
||||
|
||||
/// @brief add a single value node to the iterator's keys
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "RocksDBEngine/RocksDBIndex.h"
|
||||
#include "RocksDBEngine/RocksDBKey.h"
|
||||
#include "RocksDBEngine/RocksDBKeyBounds.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
|
@ -50,24 +51,33 @@ class RocksDBEdgeIndexIterator final : public IndexIterator {
|
|||
transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::RocksDBEdgeIndex const* index,
|
||||
std::unique_ptr<VPackBuilder>& keys);
|
||||
|
||||
std::unique_ptr<VPackBuilder>& keys,
|
||||
bool useCache, cache::Cache*);
|
||||
~RocksDBEdgeIndexIterator();
|
||||
|
||||
char const* typeName() const override { return "edge-index-iterator"; }
|
||||
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
||||
void reset() override;
|
||||
|
||||
private:
|
||||
bool updateBounds();
|
||||
|
||||
void updateBounds(StringRef fromTo);
|
||||
bool lookupDocumentAndUseCb(
|
||||
StringRef primaryKey, TokenCallback const&, size_t& limit, RocksDBToken&);
|
||||
std::unique_ptr<arangodb::velocypack::Builder> _keys;
|
||||
arangodb::velocypack::ArrayIterator _keysIterator;
|
||||
RocksDBEdgeIndex const* _index;
|
||||
std::unique_ptr<rocksdb::Iterator> _iterator;
|
||||
|
||||
//the following 2 values are required for correct batch handling
|
||||
std::unique_ptr<rocksdb::Iterator> _iterator; //iterator position in rocksdb
|
||||
VPackSlice _arraySlice;
|
||||
VPackBuffer<uint8_t> _arrayBuffer;
|
||||
velocypack::ArrayIterator _arrayIterator; //position in cache for multiple batches
|
||||
|
||||
RocksDBKeyBounds _bounds;
|
||||
bool _doUpdateBounds;
|
||||
bool _useCache;
|
||||
cache::Cache* _cache;
|
||||
VPackBuilder _cacheValueBuilder;
|
||||
std::size_t _cacheValueSize;
|
||||
};
|
||||
|
||||
class RocksDBEdgeIndex final : public RocksDBIndex {
|
||||
|
@ -100,19 +110,17 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
|
||||
void toVelocyPack(VPackBuilder&, bool, bool) const override;
|
||||
|
||||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
int insert(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
int insertRaw(rocksdb::WriteBatchWithIndex*,
|
||||
TRI_voc_rid_t, VPackSlice const&) override;
|
||||
int insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
VPackSlice const&) override;
|
||||
|
||||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
int removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
void batchInsert(
|
||||
|
@ -142,9 +150,8 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
/// entries.
|
||||
void expandInSearchValues(arangodb::velocypack::Slice const,
|
||||
arangodb::velocypack::Builder&) const override;
|
||||
|
||||
int cleanup() override;
|
||||
|
||||
|
||||
private:
|
||||
/// @brief create the iterator
|
||||
IndexIterator* createEqIterator(transaction::Methods*, ManagedDocumentResult*,
|
||||
|
|
|
@ -232,7 +232,7 @@ void RocksDBEngine::start() {
|
|||
rocksdb::BlockBasedTableOptions table_options;
|
||||
if (opts->_blockCacheSize > 0) {
|
||||
auto cache =
|
||||
rocksdb::NewLRUCache(opts->_blockCacheSize, opts->_blockCacheShardBits);
|
||||
rocksdb::NewLRUCache(opts->_blockCacheSize, (int)opts->_blockCacheShardBits);
|
||||
table_options.block_cache = cache;
|
||||
} else {
|
||||
table_options.no_block_cache = true;
|
||||
|
@ -527,8 +527,7 @@ std::string RocksDBEngine::collectionPath(TRI_vocbase_t const* vocbase,
|
|||
}
|
||||
|
||||
void RocksDBEngine::waitForSync(TRI_voc_tick_t tick) {
|
||||
// TODO: does anything need to be done here?
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
// intentionally empty, not useful for this type of engine
|
||||
}
|
||||
|
||||
std::shared_ptr<arangodb::velocypack::Builder>
|
||||
|
@ -835,20 +834,6 @@ void RocksDBEngine::createIndex(TRI_vocbase_t* vocbase,
|
|||
TRI_idx_iid_t indexId,
|
||||
arangodb::velocypack::Slice const& data) {}
|
||||
|
||||
void RocksDBEngine::dropIndex(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_cid_t collectionId, TRI_idx_iid_t iid) {
|
||||
// probably not required
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::dropIndexWalMarker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data,
|
||||
bool writeMarker, int&) {
|
||||
// probably not required
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::unloadCollection(TRI_vocbase_t* vocbase,
|
||||
arangodb::LogicalCollection* collection) {
|
||||
// TODO: does anything else have to happen?
|
||||
|
@ -892,80 +877,10 @@ void RocksDBEngine::signalCleanup(TRI_vocbase_t*) {
|
|||
// nothing to do here
|
||||
}
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
void RocksDBEngine::iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
bool RocksDBEngine::cleanupCompactionBlockers(TRI_vocbase_t* vocbase) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
int RocksDBEngine::insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
|
||||
TRI_voc_tick_t& id) {
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
int RocksDBEngine::extendCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id, double ttl) {
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
int RocksDBEngine::removeCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there
|
||||
/// is no compaction ongoing
|
||||
void RocksDBEngine::preventCompaction(
|
||||
TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
bool RocksDBEngine::tryPreventCompaction(
|
||||
TRI_vocbase_t* vocbase, std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
int RocksDBEngine::shutdownDatabase(TRI_vocbase_t* vocbase) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBEngine::openCollection(TRI_vocbase_t* vocbase,
|
||||
LogicalCollection* collection,
|
||||
bool ignoreErrors) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// @brief Add engine-specific AQL functions.
|
||||
void RocksDBEngine::addAqlFunctions() {
|
||||
RocksDBAqlFunctions::registerResources();
|
||||
|
@ -1007,16 +922,16 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
|
|||
return it->second;
|
||||
}
|
||||
|
||||
bool RocksDBEngine::syncWal() {
|
||||
arangodb::Result RocksDBEngine::syncWal() {
|
||||
#ifdef _WIN32
|
||||
// SyncWAL always reports "not implemented" on Windows
|
||||
return true;
|
||||
return arangodb::Result();
|
||||
#else
|
||||
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
return false;
|
||||
return rocksutils::convertStatus(status);
|
||||
}
|
||||
return true;
|
||||
return arangodb::Result();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -172,11 +172,6 @@ class RocksDBEngine final : public StorageEngine {
|
|||
void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id,
|
||||
arangodb::velocypack::Slice const& data) override;
|
||||
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id) override;
|
||||
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data,
|
||||
bool writeMarker, int&) override;
|
||||
void unloadCollection(TRI_vocbase_t* vocbase,
|
||||
arangodb::LogicalCollection* collection) override;
|
||||
void createView(TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
|
||||
|
@ -192,50 +187,8 @@ class RocksDBEngine final : public StorageEngine {
|
|||
arangodb::LogicalView const*, bool doSync) override;
|
||||
void signalCleanup(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
void iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb)
|
||||
override;
|
||||
void addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
void removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
|
||||
TRI_voc_tick_t& id) override;
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id,
|
||||
double ttl) override;
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
int removeCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id) override;
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there
|
||||
/// is no compaction ongoing
|
||||
void preventCompaction(
|
||||
TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) override;
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
bool tryPreventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) override;
|
||||
|
||||
int shutdownDatabase(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection,
|
||||
bool ignoreErrors) override;
|
||||
|
||||
/// @brief Add engine-specific AQL functions.
|
||||
void addAqlFunctions() override;
|
||||
|
||||
|
@ -281,7 +234,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
static std::string const FeatureName;
|
||||
RocksDBCounterManager* counterManager() const;
|
||||
RocksDBReplicationManager* replicationManager() const;
|
||||
bool syncWal();
|
||||
arangodb::Result syncWal();
|
||||
|
||||
private:
|
||||
/// single rocksdb database used in this storage engine
|
||||
|
|
|
@ -192,11 +192,10 @@ bool RocksDBFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
||||
|
@ -206,10 +205,8 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
|||
RocksDBValue value = RocksDBValue::IndexValue();
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
size_t const count = words.size();
|
||||
size_t i = 0;
|
||||
for (; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
// size_t const count = words.size();
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
|
||||
|
@ -220,21 +217,21 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
/*if (res != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
std::string const& word = words[j];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
rtrx->Delete(key.string());
|
||||
}
|
||||
}
|
||||
}*/
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
@ -244,9 +241,7 @@ int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
|||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
RocksDBValue value = RocksDBValue::IndexValue();
|
||||
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
batch->Put(key.string(), value.string());
|
||||
|
@ -258,7 +253,7 @@ int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
|||
int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
// LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not build wordlist";
|
||||
|
@ -272,9 +267,7 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
|||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
|
||||
|
@ -287,15 +280,14 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::removeRaw(rocksdb::WriteBatch* batch, TRI_voc_rid_t,
|
||||
int RocksDBFulltextIndex::removeRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
batch->Delete(key.string());
|
||||
|
@ -315,9 +307,8 @@ int RocksDBFulltextIndex::cleanup() {
|
|||
|
||||
/// @brief walk over the attribute. Also Extract sub-attributes and elements in
|
||||
/// list.
|
||||
static void ExtractWords(std::vector<std::string>& words,
|
||||
VPackSlice const value, size_t minWordLength,
|
||||
int level) {
|
||||
static void ExtractWords(std::set<std::string>& words, VPackSlice const value,
|
||||
size_t minWordLength, int level) {
|
||||
if (value.isString()) {
|
||||
// extract the string value for the indexed attribute
|
||||
std::string text = value.copyString();
|
||||
|
@ -340,8 +331,8 @@ static void ExtractWords(std::vector<std::string>& words,
|
|||
|
||||
/// @brief callback function called by the fulltext index to determine the
|
||||
/// words to index for a specific document
|
||||
std::vector<std::string> RocksDBFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> RocksDBFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::set<std::string> words;
|
||||
try {
|
||||
VPackSlice const value = doc.get(_attr);
|
||||
|
||||
|
|
|
@ -115,7 +115,7 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
|
|||
|
||||
/// remove index elements and put it in the specified write batch. Should be
|
||||
/// used as an optimization for the non transactional fillIndex method
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
int removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
// TRI_fts_index_t* internals() { return _fulltextIndex; }
|
||||
|
@ -131,7 +131,7 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
|
|||
velocypack::Builder& builder);
|
||||
|
||||
private:
|
||||
std::vector<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
std::set<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
||||
/// @brief the indexed attribute (path)
|
||||
std::vector<std::string> _attr;
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
|
||||
#include "RocksDBGeoIndex.h"
|
||||
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include "Aql/Ast.h"
|
||||
#include "Aql/AstNode.h"
|
||||
#include "Aql/SortCondition.h"
|
||||
|
@ -31,10 +32,10 @@
|
|||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include "RocksDBEngine/RocksDBTransactionState.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::rocksdbengine;
|
||||
|
||||
RocksDBGeoIndexIterator::RocksDBGeoIndexIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
|
@ -267,31 +268,28 @@ RocksDBGeoIndex::RocksDBGeoIndex(TRI_idx_iid_t iid,
|
|||
TRI_ERROR_BAD_PARAMETER,
|
||||
"RocksDBGeoIndex can only be created with one or two fields.");
|
||||
}
|
||||
|
||||
|
||||
|
||||
// cheap trick to get the last inserted pot and slot number
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
rocksdb::ReadOptions opts;
|
||||
std::unique_ptr<rocksdb::Iterator> iter(db->NewIterator(opts));
|
||||
|
||||
int numPots = 0;
|
||||
RocksDBKeyBounds b1 = RocksDBKeyBounds::GeoIndex(_objectId, false);
|
||||
iter->SeekForPrev(b1.end());
|
||||
if (iter->Valid()
|
||||
&& _cmp->Compare(b1.start(), iter->key()) < 0
|
||||
&& _cmp->Compare(iter->key(), b1.end()) < 0) {
|
||||
if (iter->Valid() && _cmp->Compare(b1.start(), iter->key()) < 0 &&
|
||||
_cmp->Compare(iter->key(), b1.end()) < 0) {
|
||||
// found a key smaller than bounds end
|
||||
std::pair<bool, int32_t> pair = RocksDBKey::geoValues(iter->key());
|
||||
TRI_ASSERT(pair.first == false);
|
||||
numPots = pair.second;
|
||||
}
|
||||
|
||||
|
||||
int numSlots = 0;
|
||||
RocksDBKeyBounds b2 = RocksDBKeyBounds::GeoIndex(_objectId, true);
|
||||
iter->SeekForPrev(b2.end());
|
||||
if (iter->Valid()
|
||||
&& _cmp->Compare(b2.start(), iter->key()) < 0
|
||||
&& _cmp->Compare(iter->key(), b2.end()) < 0) {
|
||||
if (iter->Valid() && _cmp->Compare(b2.start(), iter->key()) < 0 &&
|
||||
_cmp->Compare(iter->key(), b2.end()) < 0) {
|
||||
// found a key smaller than bounds end
|
||||
std::pair<bool, int32_t> pair = RocksDBKey::geoValues(iter->key());
|
||||
TRI_ASSERT(pair.first);
|
||||
|
@ -408,8 +406,9 @@ bool RocksDBGeoIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
return true;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
/// internal insert function, set batch or trx before calling
|
||||
int RocksDBGeoIndex::internalInsert(TRI_voc_rid_t revisionId,
|
||||
velocypack::Slice const& doc) {
|
||||
double latitude;
|
||||
double longitude;
|
||||
|
||||
|
@ -459,7 +458,6 @@ int RocksDBGeoIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId,
|
|||
gc.data = static_cast<uint64_t>(revisionId);
|
||||
|
||||
int res = GeoIndex_insert(_geoIndex, &gc);
|
||||
|
||||
if (res == -1) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "found duplicate entry in geo-index, should not happen";
|
||||
|
@ -469,22 +467,36 @@ int RocksDBGeoIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId,
|
|||
} else if (res == -3) {
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
|
||||
<< "illegal geo-coordinates, ignoring entry";
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
} else if (res < 0) {
|
||||
return TRI_set_errno(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::insert(transaction::Methods* trx, TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
||||
GeoIndex_setRocksTransaction(_geoIndex, rtrx);
|
||||
int res = this->internalInsert(revisionId, doc);
|
||||
GeoIndex_clearRocks(_geoIndex);
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t revisionId,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
return this->insert(nullptr, revisionId, doc, false);
|
||||
GeoIndex_setRocksBatch(_geoIndex, batch);
|
||||
int res = this->internalInsert(revisionId, doc);
|
||||
GeoIndex_clearRocks(_geoIndex);
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::remove(transaction::Methods*, TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
/// internal remove function, set batch or trx before calling
|
||||
int RocksDBGeoIndex::internalRemove(TRI_voc_rid_t revisionId,
|
||||
velocypack::Slice const& doc) {
|
||||
double latitude = 0.0;
|
||||
double longitude = 0.0;
|
||||
bool ok = true;
|
||||
|
@ -542,9 +554,25 @@ int RocksDBGeoIndex::remove(transaction::Methods*, TRI_voc_rid_t revisionId,
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t revisionId,
|
||||
int RocksDBGeoIndex::remove(transaction::Methods* trx, TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
||||
GeoIndex_setRocksTransaction(_geoIndex, rtrx);
|
||||
int res = this->internalRemove(revisionId, doc);
|
||||
GeoIndex_clearRocks(_geoIndex);
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::removeRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t revisionId,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
return this->remove(nullptr, revisionId, doc, false);
|
||||
GeoIndex_setRocksBatch(_geoIndex, batch);
|
||||
int res = this->internalRemove(revisionId, doc);
|
||||
GeoIndex_clearRocks(_geoIndex);
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBGeoIndex::unload() {
|
||||
|
|
|
@ -34,15 +34,14 @@
|
|||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace ::arangodb::rocksdbengine;
|
||||
namespace arangodb {
|
||||
|
||||
// GeoCoordinate.data must be capable of storing revision ids
|
||||
static_assert(sizeof(GeoCoordinate::data) >= sizeof(TRI_voc_rid_t),
|
||||
static_assert(sizeof(arangodb::rocksdbengine::GeoCoordinate::data) >=
|
||||
sizeof(TRI_voc_rid_t),
|
||||
"invalid size of GeoCoordinate.data");
|
||||
|
||||
namespace arangodb {
|
||||
class RocksDBGeoIndex;
|
||||
|
||||
class RocksDBGeoIndexIterator final : public IndexIterator {
|
||||
public:
|
||||
/// @brief Construct an RocksDBGeoIndexIterator based on Ast Conditions
|
||||
|
@ -62,14 +61,14 @@ class RocksDBGeoIndexIterator final : public IndexIterator {
|
|||
void reset() override;
|
||||
|
||||
private:
|
||||
size_t findLastIndex(GeoCoordinates* coords) const;
|
||||
void replaceCursor(::GeoCursor* c);
|
||||
size_t findLastIndex(arangodb::rocksdbengine::GeoCoordinates* coords) const;
|
||||
void replaceCursor(arangodb::rocksdbengine::GeoCursor* c);
|
||||
void createCursor(double lat, double lon);
|
||||
void evaluateCondition(); // called in constructor
|
||||
|
||||
RocksDBGeoIndex const* _index;
|
||||
::GeoCursor* _cursor;
|
||||
::GeoCoordinate _coor;
|
||||
arangodb::rocksdbengine::GeoCursor* _cursor;
|
||||
arangodb::rocksdbengine::GeoCoordinate _coor;
|
||||
arangodb::aql::AstNode const* _condition;
|
||||
double _lat;
|
||||
double _lon;
|
||||
|
@ -144,18 +143,20 @@ class RocksDBGeoIndex final : public RocksDBIndex {
|
|||
arangodb::velocypack::Slice const&) override;
|
||||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
int removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
int unload() override;
|
||||
|
||||
/// @brief looks up all points within a given radius
|
||||
GeoCoordinates* withinQuery(transaction::Methods*, double, double,
|
||||
double) const;
|
||||
arangodb::rocksdbengine::GeoCoordinates* withinQuery(transaction::Methods*,
|
||||
double, double,
|
||||
double) const;
|
||||
|
||||
/// @brief looks up the nearest points
|
||||
GeoCoordinates* nearQuery(transaction::Methods*, double, double,
|
||||
size_t) const;
|
||||
arangodb::rocksdbengine::GeoCoordinates* nearQuery(transaction::Methods*,
|
||||
double, double,
|
||||
size_t) const;
|
||||
|
||||
bool isSame(std::vector<std::string> const& location, bool geoJson) const {
|
||||
return (!_location.empty() && _location == location && _geoJson == geoJson);
|
||||
|
@ -168,6 +169,11 @@ class RocksDBGeoIndex final : public RocksDBIndex {
|
|||
}
|
||||
|
||||
private:
|
||||
/// internal insert function, set batch or trx before calling
|
||||
int internalInsert(TRI_voc_rid_t, velocypack::Slice const&);
|
||||
/// internal remove function, set batch or trx before calling
|
||||
int internalRemove(TRI_voc_rid_t, velocypack::Slice const&);
|
||||
|
||||
/// @brief attribute paths
|
||||
std::vector<std::string> _location;
|
||||
std::vector<std::string> _latitude;
|
||||
|
@ -181,15 +187,15 @@ class RocksDBGeoIndex final : public RocksDBIndex {
|
|||
bool _geoJson;
|
||||
|
||||
/// @brief the actual geo index
|
||||
GeoIdx* _geoIndex;
|
||||
arangodb::rocksdbengine::GeoIdx* _geoIndex;
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
namespace std {
|
||||
template <>
|
||||
class default_delete<GeoCoordinates> {
|
||||
class default_delete<arangodb::rocksdbengine::GeoCoordinates> {
|
||||
public:
|
||||
void operator()(GeoCoordinates* result) {
|
||||
void operator()(arangodb::rocksdbengine::GeoCoordinates* result) {
|
||||
if (result != nullptr) {
|
||||
GeoIndex_CoordinatesFree(result);
|
||||
}
|
||||
|
|
|
@ -29,6 +29,8 @@
|
|||
#include <iostream>
|
||||
|
||||
#include <RocksDBEngine/RocksDBGeoIndexImpl.h>
|
||||
#include <rocksdb/utilities/transaction.h>
|
||||
#include <rocksdb/utilities/write_batch_with_index.h>
|
||||
|
||||
/* Radius of the earth used for distances */
|
||||
#define EARTHRADIAN 6371000.0
|
||||
|
@ -130,6 +132,8 @@ typedef struct {
|
|||
GeoIndexFixed fixed; /* fixed point data */
|
||||
int nextFreePot; /* pots allocated */
|
||||
int nextFreeSlot; /* slots allocated */
|
||||
rocksdb::Transaction *rocksTransaction;
|
||||
rocksdb::WriteBatchWithIndex *rocksBatch;
|
||||
//GeoPot* ypots; /* the pots themselves */// gone
|
||||
//GeoCoordinate* gxc; /* the slots themselves */// gone
|
||||
//size_t _memoryUsed; /* the amount of memory currently used */// gone
|
||||
|
@ -267,74 +271,109 @@ namespace arangodb { namespace rocksdbengine {
|
|||
|
||||
|
||||
/* CRUD interface */
|
||||
int SlotRead(GeoIx * gix, int slot, GeoCoordinate * gc /*out param*/)
|
||||
{
|
||||
//gc GeoCoordinate, element in point array of real geo index
|
||||
//memcpy(gc,gix->gxc+slot,sizeof(GeoCoordinate));
|
||||
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
std::string slotValue;
|
||||
void GeoIndex_setRocksTransaction(GeoIdx* gi,
|
||||
rocksdb::Transaction* trx) {
|
||||
GeoIx* gix = (GeoIx*)gi;
|
||||
gix->rocksTransaction = trx;
|
||||
}
|
||||
|
||||
void GeoIndex_setRocksBatch(GeoIdx* gi,
|
||||
rocksdb::WriteBatchWithIndex* wb) {
|
||||
GeoIx* gix = (GeoIx*)gi;
|
||||
gix->rocksBatch = wb;
|
||||
}
|
||||
|
||||
void GeoIndex_clearRocks(GeoIdx* gi) {
|
||||
GeoIx* gix = (GeoIx*)gi;
|
||||
gix->rocksTransaction = nullptr;
|
||||
gix->rocksBatch = nullptr;
|
||||
}
|
||||
|
||||
inline void RocksRead(GeoIx * gix, RocksDBKey const& key, std::string *val) {
|
||||
rocksdb::Status s;
|
||||
rocksdb::ReadOptions opts;
|
||||
rocksdb::Status s = db->Get(opts, key.string(), &slotValue);
|
||||
if (gix->rocksTransaction != nullptr) {
|
||||
s = gix->rocksTransaction->Get(opts, key.string(), val);
|
||||
} else {
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
if (gix->rocksBatch != nullptr) {
|
||||
s = gix->rocksBatch->GetFromBatchAndDB(db, opts, key.string(), val);
|
||||
} else {
|
||||
s = db->Get(opts, key.string(), val);
|
||||
}
|
||||
}
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
//VpackToCoord(val.slice(), gc);
|
||||
memcpy(gc, slotValue.data(), slotValue.size());
|
||||
|
||||
return 0;
|
||||
}
|
||||
void SlotWrite(GeoIx * gix,int slot, GeoCoordinate * gc)
|
||||
{
|
||||
//memcpy(gix->gxc+slot,gc,sizeof(GeoCoordinate));
|
||||
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
inline void RocksWrite(GeoIx * gix,
|
||||
RocksDBKey const& key,
|
||||
rocksdb::Slice const& slice) {
|
||||
rocksdb::Status s;
|
||||
if (gix->rocksTransaction != nullptr) {
|
||||
s = gix->rocksTransaction->Put(key.string(), slice);
|
||||
} else {
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
if (gix->rocksBatch != nullptr) {
|
||||
gix->rocksBatch->Put(key.string(), slice);
|
||||
} else {
|
||||
rocksdb::WriteOptions opts;
|
||||
s = db->Put(opts, key.string(), slice);
|
||||
}
|
||||
}
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
}
|
||||
|
||||
rocksdb::WriteOptions opts;
|
||||
rocksdb::Status s = db->Put(opts, key.string(),
|
||||
rocksdb::Slice((char*)gc,
|
||||
sizeof(GeoCoordinate)));
|
||||
inline void RocksDelete(GeoIx* gix, RocksDBKey const& key) {
|
||||
rocksdb::Status s;
|
||||
if (gix->rocksTransaction != nullptr) {
|
||||
s = gix->rocksTransaction->Delete(key.string());
|
||||
} else {
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
if (gix->rocksBatch != nullptr) {
|
||||
gix->rocksBatch->Delete(key.string());
|
||||
} else {
|
||||
rocksdb::WriteOptions opts;
|
||||
s = db->Delete(opts, key.string());
|
||||
}
|
||||
}
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
}
|
||||
|
||||
int PotRead(GeoIx * gix, int pot, GeoPot * gp)
|
||||
void SlotRead(GeoIx * gix, int slot, GeoCoordinate * gc /*out param*/)
|
||||
{
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
std::string slotValue;
|
||||
RocksRead(gix, key, &slotValue);
|
||||
memcpy(gc, slotValue.data(), slotValue.size());
|
||||
}
|
||||
void SlotWrite(GeoIx * gix,int slot, GeoCoordinate * gc)
|
||||
{
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
RocksWrite(gix, key, rocksdb::Slice((char*)gc,
|
||||
sizeof(GeoCoordinate)));
|
||||
}
|
||||
|
||||
void PotRead(GeoIx * gix, int pot, GeoPot * gp)
|
||||
{
|
||||
//memcpy(gp,gix->ypots+pot,sizeof(GeoPot));
|
||||
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, pot, false);
|
||||
std::string potValue;
|
||||
|
||||
rocksdb::ReadOptions opts;
|
||||
rocksdb::Status s = db->Get(opts, key.string(), &potValue);
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
RocksRead(gix, key, &potValue);
|
||||
memcpy(gp, potValue.data(), potValue.size());
|
||||
return 0;
|
||||
}
|
||||
|
||||
void PotWrite(GeoIx * gix, int pot, GeoPot * gp) {
|
||||
//memcpy(gix->ypots+pot,gp,sizeof(GeoPot));
|
||||
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, pot, false);
|
||||
|
||||
rocksdb::WriteOptions opts;
|
||||
rocksdb::Status s = db->Put(opts, key.string(),
|
||||
rocksdb::Slice((char*)gp,
|
||||
sizeof(GeoPot)));
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
RocksWrite(gix, key, rocksdb::Slice((char*)gp, sizeof(GeoPot)));
|
||||
}
|
||||
|
||||
/* =================================================== */
|
||||
|
@ -370,18 +409,8 @@ double GeoIndex_distance(GeoCoordinate* c1, GeoCoordinate* c2) {
|
|||
/* free list. */
|
||||
/* =================================================== */
|
||||
void GeoIndexFreePot(GeoIx* gix, int pot) {// rewrite delete in rocksdb
|
||||
// gix->ypots[pot].LorLeaf = gix->ypots[0].LorLeaf;
|
||||
//gix->ypots[0].LorLeaf = pot;
|
||||
|
||||
rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, pot, false);
|
||||
|
||||
rocksdb::WriteOptions opts;
|
||||
rocksdb::Status s = db->Delete(opts, key.string());
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}
|
||||
RocksDelete(gix, key);
|
||||
}
|
||||
/* =================================================== */
|
||||
/* GeoIndexNewPot */
|
||||
|
@ -401,54 +430,6 @@ void GeoIndexFreePot(GeoIx* gix, int pot) {// rewrite delete in rocksdb
|
|||
/* needed) before it gets too far into things. */
|
||||
/* =================================================== */
|
||||
int GeoIndexNewPot(GeoIx* gix) {// rocksdb initial put
|
||||
/*int j;
|
||||
GeoPot* gp;
|
||||
if (gix->ypots[0].LorLeaf == 0) {
|
||||
// do the growth calculation in long long to make sure it doesn't
|
||||
// overflow when the size gets to be near 2^31
|
||||
long long x = gix->potct;
|
||||
long long y = 100 + GeoIndexGROW;
|
||||
x = x * y + 99;
|
||||
y = 100;
|
||||
x = x / y;
|
||||
if (x > 1000000000L) return -2;
|
||||
int newpotct = (int)x;
|
||||
gp = static_cast<GeoPot*>(TRI_Reallocate(TRI_UNKNOWN_MEM_ZONE, gix->ypots,
|
||||
newpotct * sizeof(GeoPot)));
|
||||
|
||||
if (gp == nullptr) {
|
||||
return -2;
|
||||
}
|
||||
gix->ypots = gp;
|
||||
|
||||
// update memory usage
|
||||
gix->_memoryUsed -= gix->potct * sizeof(GeoPot);
|
||||
gix->_memoryUsed += newpotct * sizeof(GeoPot);
|
||||
|
||||
for (j = gix->potct; j < newpotct; j++) {
|
||||
GeoIndexFreePot(gix, j);
|
||||
}
|
||||
gix->potct = newpotct;
|
||||
}*/
|
||||
//j = gix->ypots[0].LorLeaf;
|
||||
//gix->ypots[0].LorLeaf = gix->ypots[j].LorLeaf;
|
||||
//return j;
|
||||
|
||||
//
|
||||
//gp.LorLeaf = pot - 1;
|
||||
//gix->ypots[0].LorLeaf = pot;
|
||||
|
||||
/*rocksdb::TransactionDB *db = rocksutils::globalRocksDB();
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, pot, false);
|
||||
|
||||
GeoPot gp = {};
|
||||
rocksdb::WriteOptions opts;
|
||||
rocksdb::Status s = db->Put(opts, key.string(), rocksdb::Slice((char*)(&gp),
|
||||
sizeof(GeoPot)));
|
||||
if (!s.ok()) {
|
||||
arangodb::Result r = rocksutils::convertStatus(s, rocksutils::index);
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(r.errorNumber(), r.errorMessage());
|
||||
}*/
|
||||
return gix->nextFreePot++;
|
||||
}
|
||||
/* =================================================== */
|
||||
|
@ -484,6 +465,9 @@ GeoIdx* GeoIndex_new(uint64_t objectId,
|
|||
if (gix == nullptr) {
|
||||
return (GeoIdx*)gix;
|
||||
}
|
||||
// need to set these to null
|
||||
gix->rocksTransaction = nullptr;
|
||||
gix->rocksBatch = nullptr;
|
||||
|
||||
/* set up the fixed points structure */
|
||||
|
||||
|
@ -1261,9 +1245,8 @@ GeoCoordinates* GeoIndex_NearestCountPoints(GeoIdx* gi, GeoCoordinate* c,
|
|||
/* return the specified slot to the free list */
|
||||
/* =================================================== */
|
||||
void GeoIndexFreeSlot(GeoIx* gix, int slot) {
|
||||
//gix->gxc[slot].latitude = gix->gxc[0].latitude;
|
||||
//gix->gxc[0].latitude = slot;
|
||||
// TODO delete slot
|
||||
RocksDBKey key = RocksDBKey::GeoIndexValue(gix->objectId, slot, true);
|
||||
RocksDelete(gix, key);
|
||||
}
|
||||
/* =================================================== */
|
||||
/* GeoIndexNewSlot */
|
||||
|
|
|
@ -30,6 +30,11 @@
|
|||
#include "Basics/Common.h"
|
||||
#include <cstdint>
|
||||
|
||||
namespace rocksdb {
|
||||
class Transaction;
|
||||
class WriteBatchWithIndex;
|
||||
}
|
||||
|
||||
namespace arangodb { namespace rocksdbengine {
|
||||
|
||||
/* first the things that a user might want to change */
|
||||
|
@ -109,6 +114,11 @@ void GeoIndex_CoordinatesFree(GeoCoordinates* clist);
|
|||
void GeoIndex_INDEXDUMP(GeoIdx* gi, FILE* f);
|
||||
int GeoIndex_INDEXVALID(GeoIdx* gi);
|
||||
#endif
|
||||
|
||||
void GeoIndex_setRocksTransaction(GeoIdx* gi, rocksdb::Transaction*);
|
||||
void GeoIndex_setRocksBatch(GeoIdx* gi, rocksdb::WriteBatchWithIndex*);
|
||||
void GeoIndex_clearRocks(GeoIdx* gi);
|
||||
|
||||
}}
|
||||
#endif
|
||||
/* end of GeoIdx.h */
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#include "RocksDBIndex.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cache/CacheManagerFeature.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
#include "Cache/Common.h"
|
||||
#include "Cache/Manager.h"
|
||||
#include "RocksDBEngine/RocksDBComparator.h"
|
||||
|
@ -37,25 +38,36 @@ using namespace arangodb;
|
|||
RocksDBIndex::RocksDBIndex(
|
||||
TRI_idx_iid_t id, LogicalCollection* collection,
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> const& attributes,
|
||||
bool unique, bool sparse, uint64_t objectId)
|
||||
bool unique, bool sparse, uint64_t objectId, bool useCache)
|
||||
: Index(id, collection, attributes, unique, sparse),
|
||||
_objectId((objectId != 0) ? objectId : TRI_NewTickServer()),
|
||||
_cmp(static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->cmp()),
|
||||
_cache(nullptr),
|
||||
_cachePresent(false),
|
||||
_useCache(false) {}
|
||||
_useCache(useCache) {
|
||||
if (_useCache) {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "creating cache";
|
||||
createCache();
|
||||
} else {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "not creating cache";
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
RocksDBIndex::RocksDBIndex(TRI_idx_iid_t id, LogicalCollection* collection,
|
||||
VPackSlice const& info)
|
||||
VPackSlice const& info,bool useCache)
|
||||
: Index(id, collection, info),
|
||||
_objectId(basics::VelocyPackHelper::stringUInt64(info.get("objectId"))),
|
||||
_cmp(static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->cmp()),
|
||||
_cache(nullptr),
|
||||
_cachePresent(false),
|
||||
_useCache(false) {
|
||||
_useCache(useCache) {
|
||||
if (_objectId == 0) {
|
||||
_objectId = TRI_NewTickServer();
|
||||
}
|
||||
if (_useCache) {
|
||||
createCache();
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBIndex::~RocksDBIndex() {
|
||||
|
@ -69,14 +81,14 @@ RocksDBIndex::~RocksDBIndex() {
|
|||
}
|
||||
}
|
||||
|
||||
void RocksDBIndex::load() {
|
||||
if (_useCache) {
|
||||
createCache();
|
||||
}
|
||||
void RocksDBIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
||||
TRI_ASSERT(builder.isOpenObject());
|
||||
builder.add("cacheSize", VPackValue(useCache() ? _cache->size() : 0));
|
||||
}
|
||||
|
||||
int RocksDBIndex::unload() {
|
||||
if (useCache()) {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "unload cache";
|
||||
disableCache();
|
||||
TRI_ASSERT(!_cachePresent);
|
||||
}
|
||||
|
@ -141,3 +153,20 @@ int RocksDBIndex::drop() {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
// blacklist given key from transactional cache
|
||||
void RocksDBIndex::blackListKey(char const* data, std::size_t len){
|
||||
if (useCache()) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
bool blacklisted = false;
|
||||
uint64_t attempts = 0;
|
||||
while (!blacklisted) {
|
||||
blacklisted = _cache->blacklist(data,len);
|
||||
if (attempts++ % 10 == 0) {
|
||||
if (_cache->isShutdown()) {
|
||||
disableCache();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Basics/Common.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "RocksDBEngine/RocksDBKeyBounds.h"
|
||||
#include <rocksdb/status.h>
|
||||
|
||||
namespace rocksdb {
|
||||
class WriteBatch;
|
||||
|
@ -40,19 +41,20 @@ class Cache;
|
|||
}
|
||||
class LogicalCollection;
|
||||
class RocksDBComparator;
|
||||
|
||||
|
||||
class RocksDBIndex : public Index {
|
||||
protected:
|
||||
RocksDBIndex(TRI_idx_iid_t, LogicalCollection*,
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> const&
|
||||
attributes,
|
||||
bool unique, bool sparse, uint64_t objectId = 0);
|
||||
bool unique, bool sparse, uint64_t objectId = 0, bool useCache = false);
|
||||
|
||||
RocksDBIndex(TRI_idx_iid_t, LogicalCollection*,
|
||||
arangodb::velocypack::Slice const&);
|
||||
arangodb::velocypack::Slice const&, bool useCache = false);
|
||||
|
||||
public:
|
||||
~RocksDBIndex();
|
||||
void toVelocyPackFigures(VPackBuilder& builder) const override;
|
||||
|
||||
uint64_t objectId() const { return _objectId; }
|
||||
|
||||
|
@ -72,8 +74,6 @@ class RocksDBIndex : public Index {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
void load();
|
||||
|
||||
/// insert index elements into the specified write batch. Should be used
|
||||
/// as an optimization for the non transactional fillIndex method
|
||||
virtual int insertRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
|
@ -81,7 +81,7 @@ class RocksDBIndex : public Index {
|
|||
|
||||
/// remove index elements and put it in the specified write batch. Should be
|
||||
/// used as an optimization for the non transactional fillIndex method
|
||||
virtual int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
virtual int removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) = 0;
|
||||
|
||||
void createCache();
|
||||
|
@ -89,6 +89,10 @@ class RocksDBIndex : public Index {
|
|||
|
||||
protected:
|
||||
inline bool useCache() const { return (_useCache && _cachePresent); }
|
||||
void blackListKey(char const* data, std::size_t len);
|
||||
void blackListKey(StringRef& ref){
|
||||
blackListKey(ref.data(), ref.size());
|
||||
};
|
||||
|
||||
protected:
|
||||
uint64_t _objectId;
|
||||
|
|
|
@ -298,9 +298,13 @@ RocksDBPrimaryIndex::RocksDBPrimaryIndex(
|
|||
{{arangodb::basics::AttributeName(
|
||||
StaticStrings::KeyString, false)}}),
|
||||
true, false,
|
||||
basics::VelocyPackHelper::stringUInt64(info, "objectId")) {
|
||||
if (_objectId != 0 && !ServerState::instance()->isCoordinator()) {
|
||||
_useCache = true;
|
||||
basics::VelocyPackHelper::stringUInt64(info, "objectId")
|
||||
,!ServerState::instance()->isCoordinator() /*useCache*/
|
||||
) {
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
if (_objectId == 0 ) {
|
||||
//disableCache
|
||||
_useCache = false;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -417,24 +421,7 @@ int RocksDBPrimaryIndex::insert(transaction::Methods* trx,
|
|||
return TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED;
|
||||
}
|
||||
|
||||
if (useCache()) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
// blacklist from cache
|
||||
bool blacklisted = false;
|
||||
uint64_t attempts = 0;
|
||||
while (!blacklisted) {
|
||||
blacklisted = _cache->blacklist(
|
||||
key.string().data(), static_cast<uint32_t>(key.string().size()));
|
||||
attempts++;
|
||||
if (attempts > 10) {
|
||||
if (_cache->isShutdown()) {
|
||||
disableCache();
|
||||
break;
|
||||
}
|
||||
attempts = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
blackListKey(key.string().data(), static_cast<uint32_t>(key.string().size()));
|
||||
|
||||
auto status = rtrx->Put(key.string(), value.string());
|
||||
if (!status.ok()) {
|
||||
|
@ -458,24 +445,7 @@ int RocksDBPrimaryIndex::remove(transaction::Methods* trx,
|
|||
auto key = RocksDBKey::PrimaryIndexValue(
|
||||
_objectId, StringRef(slice.get(StaticStrings::KeyString)));
|
||||
|
||||
if (useCache()) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
// blacklist from cache
|
||||
bool blacklisted = false;
|
||||
uint64_t attempts = 0;
|
||||
while (!blacklisted) {
|
||||
blacklisted = _cache->blacklist(
|
||||
key.string().data(), static_cast<uint32_t>(key.string().size()));
|
||||
attempts++;
|
||||
if (attempts > 10) {
|
||||
if (_cache->isShutdown()) {
|
||||
disableCache();
|
||||
break;
|
||||
}
|
||||
attempts = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
blackListKey(key.string().data(), static_cast<uint32_t>(key.string().size()));
|
||||
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
|
@ -489,12 +459,9 @@ int RocksDBPrimaryIndex::remove(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int RocksDBPrimaryIndex::removeRaw(rocksdb::WriteBatch* batch, TRI_voc_rid_t,
|
||||
VPackSlice const& slice) {
|
||||
auto key = RocksDBKey::PrimaryIndexValue(
|
||||
_objectId, StringRef(slice.get(StaticStrings::KeyString)));
|
||||
batch->Delete(key.string());
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
int RocksDBPrimaryIndex::removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
VPackSlice const&) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
||||
/// @brief called when the index is dropped
|
||||
|
|
|
@ -179,7 +179,7 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
/// optimization for truncateNoTrx, never called in fillIndex
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
int removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
int drop() override;
|
||||
|
|
|
@ -223,30 +223,13 @@ void RocksDBRestExportHandler::createCursor() {
|
|||
}
|
||||
|
||||
VPackSlice options = optionsBuilder.slice();
|
||||
|
||||
uint64_t waitTime = 0;
|
||||
bool flush = arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
options, "flush", false);
|
||||
|
||||
if (flush) {
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
double flushWait =
|
||||
arangodb::basics::VelocyPackHelper::getNumericValue<double>(
|
||||
options, "flushWait", 10.0);
|
||||
|
||||
waitTime = static_cast<uint64_t>(
|
||||
flushWait * 1000 *
|
||||
1000); // flushWait is specified in s, but we need ns
|
||||
}
|
||||
|
||||
size_t limit = arangodb::basics::VelocyPackHelper::getNumericValue<size_t>(
|
||||
options, "limit", 0);
|
||||
|
||||
// this may throw!
|
||||
auto collectionExport =
|
||||
std::make_unique<RocksDBCollectionExport>(_vocbase, name, _restrictions);
|
||||
collectionExport->run(waitTime, limit);
|
||||
collectionExport->run(limit);
|
||||
|
||||
size_t batchSize =
|
||||
arangodb::basics::VelocyPackHelper::getNumericValue<size_t>(
|
||||
|
|
|
@ -366,11 +366,6 @@ void RocksDBRestReplicationHandler::handleCommandBatch() {
|
|||
// extract ttl
|
||||
// double expires =
|
||||
// VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
|
||||
|
||||
// TRI_voc_tick_t id;
|
||||
// StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
// int res = engine->insertCompactionBlocker(_vocbase, expires, id);
|
||||
|
||||
RocksDBReplicationContext* ctx = _manager->createContext();
|
||||
if (ctx == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to create replication context");
|
||||
|
@ -1639,9 +1634,6 @@ void RocksDBRestReplicationHandler::handleCommandSync() {
|
|||
config._verbose = verbose;
|
||||
config._useCollectionId = useCollectionId;
|
||||
|
||||
// wait until all data in current logfile got synced
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
|
||||
verbose);
|
||||
|
||||
|
|
|
@ -46,8 +46,10 @@ static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
arangodb::Result ret = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
if (!ret.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(ret.errorNumber(), ret.errorMessage());
|
||||
}
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
|
|
@ -202,12 +202,6 @@ void RocksDBVPackIndex::toVelocyPack(VPackBuilder& builder, bool withFigures,
|
|||
builder.close();
|
||||
}
|
||||
|
||||
/// @brief return a VelocyPack representation of the index figures
|
||||
void RocksDBVPackIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
||||
TRI_ASSERT(builder.isOpenObject());
|
||||
builder.add("memory", VPackValue(memory()));
|
||||
}
|
||||
|
||||
/// @brief whether or not the index is implicitly unique
|
||||
/// this can be the case if the index is not declared as unique, but contains
|
||||
/// a
|
||||
|
@ -611,7 +605,7 @@ int RocksDBVPackIndex::remove(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
int RocksDBVPackIndex::removeRaw(rocksdb::WriteBatch* writeBatch,
|
||||
int RocksDBVPackIndex::removeRaw(rocksdb::WriteBatchWithIndex* writeBatch,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc) {
|
||||
std::vector<RocksDBKey> elements;
|
||||
|
|
|
@ -121,7 +121,6 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
size_t memory() const override;
|
||||
|
||||
void toVelocyPack(VPackBuilder&, bool, bool) const override;
|
||||
void toVelocyPackFigures(VPackBuilder&) const override;
|
||||
|
||||
bool allowExpansion() const override { return true; }
|
||||
|
||||
|
@ -147,7 +146,7 @@ class RocksDBVPackIndex : public RocksDBIndex {
|
|||
int remove(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback) override;
|
||||
|
||||
int removeRaw(rocksdb::WriteBatch*, TRI_voc_rid_t,
|
||||
int removeRaw(rocksdb::WriteBatchWithIndex*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&) override;
|
||||
|
||||
int drop() override;
|
||||
|
|
|
@ -84,11 +84,6 @@ class PhysicalCollection {
|
|||
/// @brief opens an existing collection
|
||||
virtual void open(bool ignoreErrors) = 0;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
virtual int iterateMarkersOnLoad(transaction::Methods* trx) = 0;
|
||||
|
||||
virtual bool isFullyCollected() const = 0;
|
||||
|
||||
void drop();
|
||||
|
||||
////////////////////////////////////
|
||||
|
@ -144,11 +139,6 @@ class PhysicalCollection {
|
|||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) = 0;
|
||||
|
||||
virtual bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result) = 0;
|
||||
|
||||
virtual int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
arangodb::ManagedDocumentResult& result,
|
||||
|
|
|
@ -321,18 +321,6 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
virtual void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id, arangodb::velocypack::Slice const& data) = 0;
|
||||
|
||||
// asks the storage engine to drop the specified index and persist the deletion
|
||||
// info. Note that physical deletion of the index must not be carried out by this call,
|
||||
// as there may still be users of the index. It is recommended that this operation
|
||||
// only sets a deletion flag for the index but let's an async task perform
|
||||
// the actual deletion.
|
||||
// the WAL entry for index deletion will be written *after* the call
|
||||
// to "dropIndex" returns
|
||||
virtual void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id) = 0;
|
||||
|
||||
virtual void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data, bool useMarker, int&) = 0;
|
||||
// Returns the StorageEngine-specific implementation
|
||||
// of the IndexFactory. This is used to validate
|
||||
// information about indexes.
|
||||
|
@ -347,53 +335,8 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
|
||||
virtual void signalCleanup(TRI_vocbase_t* vocbase) = 0;
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
|
||||
// iterate all documents of the underlying collection
|
||||
// this is called when a collection is openend, and all its documents need to be added to
|
||||
// indexes etc.
|
||||
virtual void iterateDocuments(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb) = 0;
|
||||
|
||||
|
||||
// adds a document to the storage engine
|
||||
// this will be called by the WAL collector when surviving documents are being moved
|
||||
// into the storage engine's realm
|
||||
virtual void addDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) = 0;
|
||||
|
||||
// removes a document from the storage engine
|
||||
// this will be called by the WAL collector when non-surviving documents are being removed
|
||||
// from the storage engine's realm
|
||||
virtual void removeDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) = 0;
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
virtual bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) = 0;
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
virtual int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl, TRI_voc_tick_t& id) = 0;
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
virtual int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id, double ttl) = 0;
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
virtual int removeCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id) = 0;
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there is no compaction ongoing
|
||||
virtual void preventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) = 0;
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
virtual bool tryPreventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) = 0;
|
||||
|
||||
virtual int shutdownDatabase(TRI_vocbase_t* vocbase) = 0;
|
||||
|
||||
virtual int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection, bool ignoreErrors) = 0;
|
||||
|
||||
// AQL functions
|
||||
// -------------
|
||||
|
||||
|
|
|
@ -1412,7 +1412,7 @@ OperationResult transaction::Methods::insertLocal(
|
|||
res = workForOneDocument(value);
|
||||
}
|
||||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 &&
|
||||
isSingleOperationTransaction()) {
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
|
@ -1723,7 +1723,7 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
res = workForOneDocument(newValue, false);
|
||||
}
|
||||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
|
||||
if (res.ok() && options.waitForSync && maxTick > 0 &&
|
||||
isSingleOperationTransaction()) {
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
|
@ -1968,7 +1968,7 @@ OperationResult transaction::Methods::removeLocal(
|
|||
res = workForOneDocument(value, false);
|
||||
}
|
||||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 &&
|
||||
isSingleOperationTransaction()) {
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
|
|
|
@ -1160,12 +1160,6 @@ bool LogicalCollection::readDocument(transaction::Methods* trx,
|
|||
return getPhysical()->readDocument(trx, token, result);
|
||||
}
|
||||
|
||||
bool LogicalCollection::readDocumentConditional(
|
||||
transaction::Methods* trx, DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
|
||||
return getPhysical()->readDocumentConditional(trx, token, maxTick, result);
|
||||
}
|
||||
|
||||
/// @brief a method to skip certain documents in AQL write operations,
|
||||
/// this is only used in the enterprise edition for smart graphs
|
||||
#ifndef USE_ENTERPRISE
|
||||
|
|
|
@ -277,11 +277,6 @@ class LogicalCollection {
|
|||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
/// @brief Persist the connected physical collection.
|
||||
/// This should be called AFTER the collection is successfully
|
||||
/// created and only on Sinlge/DBServer
|
||||
|
|
|
@ -29,6 +29,7 @@ const functionsDocumentation = {
|
|||
'ldap': 'ldap tests'
|
||||
};
|
||||
const optionsDocumentation = [
|
||||
' - `skipLdap` : if set to true the LDAP tests are skipped',
|
||||
' - `ldapUrl : testing authentication and authentication_paramaters will be skipped.'
|
||||
];
|
||||
|
||||
|
@ -235,6 +236,17 @@ exports.setup = function (testFns, defaultFns, opts, fnDocs, optionsDoc) {
|
|||
opts['ldapUrl'] = '127.0.0.1';
|
||||
opts['caCertFilePath'] = '~/ca_cert.pem';
|
||||
|
||||
// turn off ldap tests by default. only enable them in enterprise version
|
||||
opts['skipLdap'] = true;
|
||||
|
||||
let version = {};
|
||||
if (global.ARANGODB_CLIENT_VERSION) {
|
||||
version = global.ARANGODB_CLIENT_VERSION(true);
|
||||
if (version['enterprise-version']) {
|
||||
opts['skipLdap'] = false;
|
||||
}
|
||||
}
|
||||
|
||||
for (var attrname in functionsDocumentation) { fnDocs[attrname] = functionsDocumentation[attrname]; }
|
||||
for (var i = 0; i < optionsDocumentation.length; i++) { optionsDoc.push(optionsDocumentation[i]); }
|
||||
};
|
||||
|
|
|
@ -394,7 +394,7 @@ char* Utf8Helper::toupper(TRI_memory_zone_t* zone, char const* src,
|
|||
/// @brief Extract the words from a UTF-8 string.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool Utf8Helper::getWords(std::vector<std::string>& words,
|
||||
bool Utf8Helper::getWords(std::set<std::string>& words,
|
||||
std::string const& text, size_t minimalLength,
|
||||
size_t maximalLength, bool lowerCase) {
|
||||
UErrorCode status = U_ZERO_ERROR;
|
||||
|
@ -458,21 +458,6 @@ bool Utf8Helper::getWords(std::vector<std::string>& words,
|
|||
return false;
|
||||
}
|
||||
|
||||
// estimate an initial vector size. this is not accurate, but setting the
|
||||
// initial size to some
|
||||
// value in the correct order of magnitude will save a lot of vector
|
||||
// reallocations later
|
||||
size_t initialWordCount = textLength / (2 * (minimalLength + 1));
|
||||
if (initialWordCount < 32) {
|
||||
// alloc at least 32 pointers (= 256b)
|
||||
initialWordCount = 32;
|
||||
} else if (initialWordCount > 8192) {
|
||||
// alloc at most 8192 pointers (= 64kb)
|
||||
initialWordCount = 8192;
|
||||
}
|
||||
// Reserve initialWordCount additional words in the vector
|
||||
words.reserve(words.size() + initialWordCount);
|
||||
|
||||
BreakIterator* wordIterator =
|
||||
BreakIterator::createWordInstance(locale, status);
|
||||
TRI_ASSERT(wordIterator != nullptr);
|
||||
|
@ -496,7 +481,7 @@ bool Utf8Helper::getWords(std::vector<std::string>& words,
|
|||
chunkLength, &utf8WordLength);
|
||||
if (utf8Word != nullptr) {
|
||||
std::string word(utf8Word, utf8WordLength);
|
||||
words.emplace_back(word);
|
||||
words.emplace(word);
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, utf8Word);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ class Utf8Helper {
|
|||
/// @brief returns the words of a UTF-8 string.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool getWords(std::vector<std::string>& words, std::string const& text,
|
||||
bool getWords(std::set<std::string>& words, std::string const& text,
|
||||
size_t minimalWordLength, size_t maximalWordLength,
|
||||
bool lowerCase);
|
||||
|
||||
|
|
|
@ -4007,7 +4007,7 @@ static void JS_SplitWordlist(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
lowerCase = TRI_ObjectToBoolean(args[3]);
|
||||
}
|
||||
|
||||
std::vector<std::string> wordList;
|
||||
std::set<std::string> wordList;
|
||||
|
||||
if (!Utf8Helper::DefaultUtf8Helper.getWords(
|
||||
wordList, stringToTokenize, minLength, maxLength, lowerCase)) {
|
||||
|
@ -4017,11 +4017,11 @@ static void JS_SplitWordlist(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
v8::Handle<v8::Array> v8WordList =
|
||||
v8::Array::New(isolate, static_cast<int>(wordList.size()));
|
||||
|
||||
size_t const n = static_cast<uint32_t>(wordList.size());
|
||||
|
||||
for (uint32_t i = 0; i < n; i++) {
|
||||
v8::Handle<v8::String> oneWord = TRI_V8_STD_STRING(wordList[i]);
|
||||
uint32_t i = 0;
|
||||
for (std::string const& word : wordList) {
|
||||
v8::Handle<v8::String> oneWord = TRI_V8_STD_STRING(word);
|
||||
v8WordList->Set(i, oneWord);
|
||||
i++;
|
||||
}
|
||||
|
||||
TRI_V8_RETURN(v8WordList);
|
||||
|
|
|
@ -172,26 +172,28 @@ SECTION("tst_3") {
|
|||
SECTION("tst_4") {
|
||||
std::string testString = "Der Müller geht in die Post.";
|
||||
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> words;
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 3, UINT32_MAX, true);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((5UL) == words.size());
|
||||
CHECK(std::string("der") == words[0]);
|
||||
CHECK(std::string("müller") == words[1]);
|
||||
CHECK(std::string("geht") == words[2]);
|
||||
CHECK(std::string("die") == words[3]);
|
||||
CHECK(std::string("post") == words[4]);
|
||||
CHECK(words.find(std::string("der")) != words.end());
|
||||
CHECK(words.find(std::string("müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("die")) != words.end());
|
||||
CHECK(words.find(std::string("post")) != words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 4, UINT32_MAX, true);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((3UL) == words.size());
|
||||
CHECK(std::string("müller") == words[0]);
|
||||
CHECK(std::string("geht") == words[1]);
|
||||
CHECK(std::string("post") == words[2]);
|
||||
|
||||
CHECK(words.find(std::string("müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("post")) != words.end());
|
||||
CHECK(words.find(std::string("der")) == words.end());
|
||||
CHECK(words.find(std::string("die")) == words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, "", 3, UINT32_MAX, true);
|
||||
CHECK(words.empty());
|
||||
|
@ -200,26 +202,28 @@ SECTION("tst_4") {
|
|||
SECTION("tst_5") {
|
||||
std::string testString = "Der Müller geht in die Post.";
|
||||
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> words;
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 3, UINT32_MAX, false);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((5UL) == words.size());
|
||||
CHECK(std::string("Der") == words[0]);
|
||||
CHECK(std::string("Müller") == words[1]);
|
||||
CHECK(std::string("geht") == words[2]);
|
||||
CHECK(std::string("die") == words[3]);
|
||||
CHECK(std::string("Post") == words[4]);
|
||||
CHECK(words.find(std::string("Der")) != words.end());
|
||||
CHECK(words.find(std::string("Müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("die")) != words.end());
|
||||
CHECK(words.find(std::string("Post")) != words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 4, UINT32_MAX, false);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((3UL) == words.size());
|
||||
CHECK(std::string("Müller") == words[0]);
|
||||
CHECK(std::string("geht") == words[1]);
|
||||
CHECK(std::string("Post") == words[2]);
|
||||
|
||||
CHECK(words.find(std::string("Müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("Post")) != words.end());
|
||||
CHECK(words.find(std::string("der")) == words.end());
|
||||
CHECK(words.find(std::string("die")) == words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, "", 4, UINT32_MAX, false);
|
||||
CHECK(words.empty());
|
||||
|
|
Loading…
Reference in New Issue