mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into feature/edge-index-cache
* 'devel' of https://github.com/arangodb/arangodb: Fix abort conditions of FailedLeader removing unused and unimplemented methods cppcheck getting rid of exceptions in supervision change to sleep_until so older windows are pleased (and is a better fitting method anyway) "searching for culprit breaking ensureIndex in cluster through go driver" Fixing fulltext deduplication
This commit is contained in:
commit
e2b78d71b6
|
@ -1344,7 +1344,7 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
return result;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(waitUntil-std::chrono::steady_clock::now());
|
||||
std::this_thread::sleep_until(waitUntil);
|
||||
if (waitInterval.count() == 0.0) {
|
||||
waitInterval = std::chrono::duration<double>(0.25);
|
||||
} else if (waitInterval.count() < 5.0) {
|
||||
|
|
|
@ -318,7 +318,7 @@ bool FailedLeader::start() {
|
|||
auto slice = result.get(
|
||||
std::vector<std::string>({
|
||||
agencyPrefix, "Supervision", "Health", _from, "Status"}));
|
||||
if (!slice.isString() || slice.copyString() != "FAILED") {
|
||||
if (slice.isString() && slice.copyString() == "GOOD") {
|
||||
finish("", _shard, false, "Server " + _from + " no longer failing.");
|
||||
return false;
|
||||
}
|
||||
|
@ -327,7 +327,7 @@ bool FailedLeader::start() {
|
|||
slice = result.get(
|
||||
std::vector<std::string>(
|
||||
{agencyPrefix, "Supervision", "Health", _to, "Status"}));
|
||||
if (!slice.isString() || slice.copyString() != "GOOD") {
|
||||
if (slice.isString() && slice.copyString() != "GOOD") {
|
||||
LOG_TOPIC(INFO, Logger::SUPERVISION)
|
||||
<< "Will not failover from " << _from << " to " << _to
|
||||
<< " as target server is no longer in good condition. Will retry.";
|
||||
|
|
|
@ -807,6 +807,41 @@ bool Node::getBool() const {
|
|||
return slice().getBool();
|
||||
}
|
||||
|
||||
bool Node::isBool() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isBool();
|
||||
}
|
||||
|
||||
bool Node::isDouble() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isDouble();
|
||||
}
|
||||
|
||||
bool Node::isString() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isString();
|
||||
}
|
||||
|
||||
bool Node::isUInt() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isUInt() || slice().isSmallInt();
|
||||
}
|
||||
|
||||
bool Node::isInt() const noexcept {
|
||||
if (type() == NODE) {
|
||||
return false;
|
||||
}
|
||||
return slice().isInt() || slice().isSmallInt();
|
||||
}
|
||||
|
||||
double Node::getDouble() const {
|
||||
if (type() == NODE) {
|
||||
throw StoreException("Must not convert NODE type to double");
|
||||
|
|
|
@ -217,15 +217,30 @@ class Node {
|
|||
/// @brief Get integer value (throws if type NODE or if conversion fails)
|
||||
int getInt() const;
|
||||
|
||||
/// @brief Is UInt
|
||||
bool isInt() const noexcept;
|
||||
|
||||
/// @brief Get insigned value (throws if type NODE or if conversion fails)
|
||||
uint64_t getUInt() const;
|
||||
|
||||
/// @brief Is UInt
|
||||
bool isUInt() const noexcept;
|
||||
|
||||
/// @brief Get bool value (throws if type NODE or if conversion fails)
|
||||
bool getBool() const;
|
||||
|
||||
/// @brief Is boolean
|
||||
bool isBool() const noexcept;
|
||||
|
||||
/// @brief Get double value (throws if type NODE or if conversion fails)
|
||||
double getDouble() const;
|
||||
|
||||
/// @brief Is double
|
||||
bool isDouble() const noexcept;
|
||||
|
||||
/// @brief Is double
|
||||
bool isString() const noexcept;
|
||||
|
||||
/// @brief Get string value (throws if type NODE or if conversion fails)
|
||||
std::string getString() const;
|
||||
|
||||
|
|
|
@ -102,12 +102,12 @@ void Supervision::upgradeZero(Builder& builder) {
|
|||
{ VPackObjectBuilder o(&builder);
|
||||
builder.add(VPackValue(failedServersPrefix));
|
||||
{ VPackObjectBuilder oo(&builder);
|
||||
try {
|
||||
if (fails.length() > 0) {
|
||||
for (auto const& fail : VPackArrayIterator(fails)) {
|
||||
builder.add(VPackValue(fail.copyString()));
|
||||
{ VPackObjectBuilder ooo(&builder); }
|
||||
}
|
||||
} catch (...) {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -316,9 +316,9 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
_snapshot(currentServersRegisteredPrefix).children();
|
||||
|
||||
std::string currentFoxxmaster;
|
||||
try {
|
||||
if (_snapshot.has(foxxmaster)) {
|
||||
currentFoxxmaster = _snapshot(foxxmaster).getString();
|
||||
} catch (...) {}
|
||||
}
|
||||
|
||||
std::string goodServerId;
|
||||
bool foxxmasterOk = false;
|
||||
|
@ -461,10 +461,13 @@ bool Supervision::updateSnapshot() {
|
|||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
if (_agent->readDB().has(_agencyPrefix)) {
|
||||
_snapshot = _agent->readDB().get(_agencyPrefix);
|
||||
}
|
||||
|
||||
if (_agent->transient().has(_agencyPrefix)) {
|
||||
_transient = _agent->transient().get(_agencyPrefix);
|
||||
} catch (...) {}
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
|
@ -555,24 +558,15 @@ void Supervision::run() {
|
|||
|
||||
// Guarded by caller
|
||||
bool Supervision::isShuttingDown() {
|
||||
try {
|
||||
return _snapshot("/Shutdown").getBool();
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
return (_snapshot.has("Shutdown") && _snapshot("Shutdown").isBool()) ?
|
||||
_snapshot("/Shutdown").getBool() : false;
|
||||
}
|
||||
|
||||
// Guarded by caller
|
||||
std::string Supervision::serverHealth(std::string const& serverName) {
|
||||
try {
|
||||
std::string const serverStatus(healthPrefix + serverName + "/Status");
|
||||
auto const status = _snapshot(serverStatus).getString();
|
||||
return status;
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION)
|
||||
<< "Couldn't read server health status for server " << serverName;
|
||||
return "";
|
||||
}
|
||||
std::string const serverStatus(healthPrefix + serverName + "/Status");
|
||||
return (_snapshot.has(serverStatus)) ?
|
||||
_snapshot(serverStatus).getString() : std::string();
|
||||
}
|
||||
|
||||
// Guarded by caller
|
||||
|
@ -658,9 +652,9 @@ void Supervision::enforceReplication() {
|
|||
auto const& col = *(col_.second);
|
||||
|
||||
size_t replicationFactor;
|
||||
try {
|
||||
replicationFactor = col("replicationFactor").slice().getUInt();
|
||||
} catch (std::exception const&) {
|
||||
if (col.has("replicationFactor") && col("replicationFactor").isUInt()) {
|
||||
replicationFactor = col("replicationFactor").getUInt();
|
||||
} else {
|
||||
LOG_TOPIC(DEBUG, Logger::SUPERVISION)
|
||||
<< "no replicationFactor entry in " << col.toJson();
|
||||
continue;
|
||||
|
@ -777,11 +771,13 @@ void Supervision::shrinkCluster() {
|
|||
auto availServers = Job::availableServers(_snapshot);
|
||||
|
||||
size_t targetNumDBServers;
|
||||
try {
|
||||
targetNumDBServers = _snapshot("/Target/NumberOfDBServers").getUInt();
|
||||
} catch (std::exception const& e) {
|
||||
std::string const NDBServers ("/Target/NumberOfDBServers");
|
||||
|
||||
if (_snapshot.has(NDBServers) && _snapshot(NDBServers).isUInt()) {
|
||||
targetNumDBServers = _snapshot(NDBServers).getUInt();
|
||||
} else {
|
||||
LOG_TOPIC(TRACE, Logger::SUPERVISION)
|
||||
<< "Targeted number of DB servers not set yet: " << e.what();
|
||||
<< "Targeted number of DB servers not set yet";
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -790,7 +786,7 @@ void Supervision::shrinkCluster() {
|
|||
// Minimum 1 DB server must remain
|
||||
if (availServers.size() == 1) {
|
||||
LOG_TOPIC(DEBUG, Logger::SUPERVISION)
|
||||
<< "Only one db server left for operation";
|
||||
<< "Only one db server left for operation";
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -810,15 +806,17 @@ void Supervision::shrinkCluster() {
|
|||
auto const& databases = _snapshot(planColPrefix).children();
|
||||
for (auto const& database : databases) {
|
||||
for (auto const& collptr : database.second->children()) {
|
||||
try {
|
||||
uint64_t replFact = (*collptr.second)("replicationFactor").getUInt();
|
||||
auto const& node = *collptr.second;
|
||||
if (node.has("replicationFactor") &&
|
||||
node("replicationFactor").isUInt()) {
|
||||
auto replFact = node("replicationFactor").getUInt();
|
||||
if (replFact > maxReplFact) {
|
||||
maxReplFact = replFact;
|
||||
}
|
||||
} catch (std::exception const& e) {
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::SUPERVISION)
|
||||
<< "Cannot retrieve replication factor for collection "
|
||||
<< collptr.first << ": " << e.what();
|
||||
<< collptr.first;
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
@ -835,7 +833,7 @@ void Supervision::shrinkCluster() {
|
|||
availServers.size() > targetNumDBServers) {
|
||||
// Sort servers by name
|
||||
std::sort(availServers.begin(), availServers.end());
|
||||
|
||||
|
||||
// Schedule last server for cleanout
|
||||
CleanOutServer(_snapshot, _agent, std::to_string(_jobId++),
|
||||
"supervision", availServers.back()).run();
|
||||
|
|
|
@ -1845,6 +1845,7 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
errorMsg += trx.toJson();
|
||||
errorMsg += "ClientId: " + result._clientId + " ";
|
||||
errorMsg += " ResultCode: " + std::to_string(result.errorCode()) + " ";
|
||||
errorMsg += " HttpCode: " + std::to_string(result.httpCode()) + " ";
|
||||
errorMsg += std::string(__FILE__) + ":" + std::to_string(__LINE__);
|
||||
resultBuilder = *resBuilder;
|
||||
}
|
||||
|
|
|
@ -31,8 +31,8 @@
|
|||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Utils/CursorRepository.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
|
@ -51,7 +51,7 @@ void MMFilesCleanupThread::signal() {
|
|||
|
||||
/// @brief cleanup event loop
|
||||
void MMFilesCleanupThread::run() {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
uint64_t iterations = 0;
|
||||
|
||||
std::vector<arangodb::LogicalCollection*> collections;
|
||||
|
@ -226,8 +226,9 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec
|
|||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!collection->getPhysical()->isFullyCollected()) {
|
||||
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(collection->getPhysical());
|
||||
if (!mmColl->isFullyCollected()) {
|
||||
bool isDeleted = false;
|
||||
|
||||
// if there is still some garbage collection to perform,
|
||||
|
|
|
@ -43,6 +43,7 @@
|
|||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesDocumentOperation.h"
|
||||
#include "MMFiles/MMFilesDocumentPosition.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
|
@ -1676,7 +1677,7 @@ int MMFilesCollection::openWorker(bool ignoreErrors) {
|
|||
|
||||
try {
|
||||
// check for journals and datafiles
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->openCollection(vocbase, _logicalCollection, ignoreErrors);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -2240,9 +2241,8 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
}
|
||||
|
||||
auto cid = _logicalCollection->cid();
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->dropIndex(vocbase, cid, iid);
|
||||
|
||||
{
|
||||
bool const doSync =
|
||||
application_features::ApplicationServer::getFeature<DatabaseFeature>(
|
||||
|
|
|
@ -234,9 +234,9 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
void open(bool ignoreErrors) override;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx);
|
||||
|
||||
bool isFullyCollected() const override;
|
||||
bool isFullyCollected() const;
|
||||
|
||||
bool doCompact() const { return _doCompact; }
|
||||
|
||||
|
@ -330,7 +330,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result) override;
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
|
|
@ -25,9 +25,9 @@
|
|||
#include "Basics/WriteLocker.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
|
@ -60,7 +60,7 @@ MMFilesCollectionExport::~MMFilesCollectionExport() {
|
|||
}
|
||||
|
||||
void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
|
||||
// try to acquire the exclusive lock on the compaction
|
||||
engine->preventCompaction(_collection->vocbase(), [this](TRI_vocbase_t* vocbase) {
|
||||
|
@ -81,8 +81,9 @@ void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
|||
uint64_t tries = 0;
|
||||
uint64_t const maxTries = maxWaitTime / SleepTime;
|
||||
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
while (++tries < maxTries) {
|
||||
if (_collection->getPhysical()->isFullyCollected()) {
|
||||
if (mmColl->isFullyCollected()) {
|
||||
break;
|
||||
}
|
||||
usleep(SleepTime);
|
||||
|
@ -111,12 +112,13 @@ void MMFilesCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
|||
|
||||
_vpack.reserve(limit);
|
||||
|
||||
MMFilesCollection* mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
ManagedDocumentResult mmdr;
|
||||
trx.invokeOnAllElements(_collection->name(), [this, &limit, &trx, &mmdr](DocumentIdentifierToken const& token) {
|
||||
trx.invokeOnAllElements(_collection->name(), [this, &limit, &trx, &mmdr, mmColl](DocumentIdentifierToken const& token) {
|
||||
if (limit == 0) {
|
||||
return false;
|
||||
}
|
||||
if (_collection->readDocumentConditional(&trx, token, 0, mmdr)) {
|
||||
if (mmColl->readDocumentConditional(&trx, token, 0, mmdr)) {
|
||||
_vpack.emplace_back(mmdr.vpack());
|
||||
--limit;
|
||||
}
|
||||
|
|
|
@ -27,8 +27,8 @@
|
|||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
|
@ -60,7 +60,7 @@ MMFilesCollectionKeys::MMFilesCollectionKeys(TRI_vocbase_t* vocbase, std::string
|
|||
|
||||
MMFilesCollectionKeys::~MMFilesCollectionKeys() {
|
||||
// remove compaction blocker
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->removeCompactionBlocker(_vocbase, _blockerId);
|
||||
|
||||
if (_ditch != nullptr) {
|
||||
|
@ -76,7 +76,7 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
|
|||
MMFilesLogfileManager::instance()->waitForCollectorQueue(
|
||||
_collection->cid(), 30.0);
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
engine->preventCompaction(_collection->vocbase(), [this](TRI_vocbase_t* vocbase) {
|
||||
// create a ditch under the compaction lock
|
||||
_ditch = arangodb::MMFilesCollection::toMMFilesCollection(_collection)
|
||||
|
@ -104,9 +104,10 @@ void MMFilesCollectionKeys::create(TRI_voc_tick_t maxTick) {
|
|||
}
|
||||
|
||||
ManagedDocumentResult mmdr;
|
||||
MMFilesCollection *mmColl = MMFilesCollection::toMMFilesCollection(_collection);
|
||||
trx.invokeOnAllElements(
|
||||
_collection->name(), [this, &trx, &maxTick, &mmdr](DocumentIdentifierToken const& token) {
|
||||
if (_collection->readDocumentConditional(&trx, token, maxTick, mmdr)) {
|
||||
_collection->name(), [this, &trx, &maxTick, &mmdr, &mmColl](DocumentIdentifierToken const& token) {
|
||||
if (mmColl->readDocumentConditional(&trx, token, maxTick, mmdr)) {
|
||||
_vpack.emplace_back(mmdr.vpack());
|
||||
}
|
||||
return true;
|
||||
|
|
|
@ -35,10 +35,10 @@
|
|||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesDatafileStatisticsContainer.h"
|
||||
#include "MMFiles/MMFilesDocumentPosition.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
|
@ -856,7 +856,7 @@ void MMFilesCompactorThread::signal() {
|
|||
}
|
||||
|
||||
void MMFilesCompactorThread::run() {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
std::vector<arangodb::LogicalCollection*> collections;
|
||||
int numCompacted = 0;
|
||||
|
||||
|
|
|
@ -1604,30 +1604,6 @@ void MMFilesEngine::signalCleanup(TRI_vocbase_t* vocbase) {
|
|||
(*it).second->signal();
|
||||
}
|
||||
|
||||
// iterate all documents of the underlying collection
|
||||
// this is called when a collection is openend, and all its documents need to be
|
||||
// added to
|
||||
// indexes etc.
|
||||
void MMFilesEngine::iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb) {}
|
||||
|
||||
// adds a document to the storage engine
|
||||
// this will be called by the WAL collector when surviving documents are being
|
||||
// moved
|
||||
// into the storage engine's realm
|
||||
void MMFilesEngine::addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {}
|
||||
|
||||
// removes a document from the storage engine
|
||||
// this will be called by the WAL collector when non-surviving documents are
|
||||
// being removed
|
||||
// from the storage engine's realm
|
||||
void MMFilesEngine::removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {}
|
||||
|
||||
/// @brief scans a collection and locates all files
|
||||
MMFilesEngineCollectionFiles MMFilesEngine::scanCollectionDirectory(
|
||||
std::string const& path) {
|
||||
|
|
|
@ -267,11 +267,11 @@ class MMFilesEngine final : public StorageEngine {
|
|||
// the WAL entry for index deletion will be written *after* the call
|
||||
// to "dropIndex" returns
|
||||
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id) override;
|
||||
TRI_idx_iid_t id);
|
||||
|
||||
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data,
|
||||
bool writeMarker, int&) override;
|
||||
bool writeMarker, int&);
|
||||
|
||||
void unloadCollection(TRI_vocbase_t* vocbase,
|
||||
arangodb::LogicalCollection* collection) override;
|
||||
|
@ -298,67 +298,39 @@ class MMFilesEngine final : public StorageEngine {
|
|||
|
||||
void signalCleanup(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
|
||||
// iterate all documents of the underlying collection
|
||||
// this is called when a collection is openend, and all its documents need to
|
||||
// be added to
|
||||
// indexes etc.
|
||||
void iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb)
|
||||
override;
|
||||
|
||||
// adds a document to the storage engine
|
||||
// this will be called by the WAL collector when surviving documents are being
|
||||
// moved
|
||||
// into the storage engine's realm
|
||||
void addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
|
||||
// removes a document from the storage engine
|
||||
// this will be called by the WAL collector when non-surviving documents are
|
||||
// being removed
|
||||
// from the storage engine's realm
|
||||
void removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
|
||||
/// @brief scans a collection and locates all files
|
||||
MMFilesEngineCollectionFiles scanCollectionDirectory(std::string const& path);
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) override;
|
||||
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase);
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
|
||||
TRI_voc_tick_t& id) override;
|
||||
TRI_voc_tick_t& id);
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id,
|
||||
double ttl) override;
|
||||
double ttl);
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
int removeCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id) override;
|
||||
TRI_voc_tick_t id);
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there
|
||||
/// is no compaction ongoing
|
||||
void preventCompaction(
|
||||
TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) override;
|
||||
std::function<void(TRI_vocbase_t*)> const& callback);
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
bool tryPreventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) override;
|
||||
bool checkForActiveBlockers);
|
||||
|
||||
int shutdownDatabase(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection,
|
||||
bool ignoreErrors) override;
|
||||
bool ignoreErrors);
|
||||
|
||||
/// @brief Add engine-specific AQL functions.
|
||||
void addAqlFunctions() override;
|
||||
|
|
|
@ -38,7 +38,7 @@ using namespace arangodb;
|
|||
|
||||
/// @brief walk over the attribute. Also Extract sub-attributes and elements in
|
||||
/// list.
|
||||
static void ExtractWords(std::vector<std::string>& words,
|
||||
static void ExtractWords(std::set<std::string>& words,
|
||||
VPackSlice const value,
|
||||
size_t minWordLength,
|
||||
int level) {
|
||||
|
@ -217,7 +217,7 @@ int MMFilesFulltextIndex::insert(transaction::Methods*, TRI_voc_rid_t revisionId
|
|||
VPackSlice const& doc, bool isRollback) {
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
|
@ -260,8 +260,8 @@ int MMFilesFulltextIndex::cleanup() {
|
|||
|
||||
/// @brief callback function called by the fulltext index to determine the
|
||||
/// words to index for a specific document
|
||||
std::vector<std::string> MMFilesFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> MMFilesFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::set<std::string> words;
|
||||
try {
|
||||
VPackSlice const value = doc.get(_attr);
|
||||
|
||||
|
|
|
@ -89,7 +89,7 @@ class MMFilesFulltextIndex final : public Index {
|
|||
TRI_voc_rid_t revisionId);
|
||||
|
||||
private:
|
||||
std::vector<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
std::set<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
||||
private:
|
||||
/// @brief the indexed attribute (path)
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "Indexes/Index.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesCollectionKeys.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/mmfiles-replication-dump.h"
|
||||
#include "Replication/InitialSyncer.h"
|
||||
|
@ -542,7 +543,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
|
|||
VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
|
||||
|
||||
TRI_voc_tick_t id;
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->insertCompactionBlocker(_vocbase, expires, id);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -575,7 +576,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
|
|||
VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
|
||||
|
||||
// now extend the blocker
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->extendCompactionBlocker(_vocbase, id, expires);
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
|
@ -591,7 +592,7 @@ void MMFilesRestReplicationHandler::handleCommandBatch() {
|
|||
TRI_voc_tick_t id =
|
||||
static_cast<TRI_voc_tick_t>(StringUtils::uint64(suffixes[1]));
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
int res = engine->removeCompactionBlocker(_vocbase, id);
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
|
@ -2357,7 +2358,7 @@ void MMFilesRestReplicationHandler::handleCommandCreateKeys() {
|
|||
TRI_ASSERT(col != nullptr);
|
||||
|
||||
// turn off the compaction for the collection
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
TRI_voc_tick_t id;
|
||||
int res = engine->insertCompactionBlocker(_vocbase, 1200.0, id);
|
||||
|
||||
|
|
|
@ -25,6 +25,9 @@
|
|||
|
||||
#include "Basics/locks.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/mmfiles-fulltext-handles.h"
|
||||
#include "MMFiles/mmfiles-fulltext-list.h"
|
||||
|
@ -86,12 +89,12 @@ typedef struct node_s {
|
|||
} node_t;
|
||||
|
||||
/// @brief the actual fulltext index
|
||||
typedef struct {
|
||||
struct index__t {
|
||||
node_t* _root; // root node of the index
|
||||
|
||||
TRI_fulltext_handles_t* _handles; // handles management instance
|
||||
|
||||
TRI_read_write_lock_t _lock;
|
||||
arangodb::basics::ReadWriteLock _lock;
|
||||
|
||||
size_t _memoryAllocated; // total memory used by index
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
|
@ -103,7 +106,7 @@ typedef struct {
|
|||
|
||||
uint32_t _nodeChunkSize; // how many sub-nodes to allocate per chunk
|
||||
uint32_t _initialNodeHandles; // how many handles to allocate per node
|
||||
} index__t;
|
||||
};
|
||||
|
||||
static uint32_t NodeNumFollowers(const node_t* const);
|
||||
|
||||
|
@ -1099,8 +1102,7 @@ static inline size_t CommonPrefixLength(std::string const& left,
|
|||
TRI_fts_index_t* TRI_CreateFtsIndex(uint32_t handleChunkSize,
|
||||
uint32_t nodeChunkSize,
|
||||
uint32_t initialNodeHandles) {
|
||||
index__t* idx = static_cast<index__t*>(
|
||||
TRI_Allocate(TRI_UNKNOWN_MEM_ZONE, sizeof(index__t), false));
|
||||
index__t* idx = new index__t();
|
||||
|
||||
if (idx == nullptr) {
|
||||
return nullptr;
|
||||
|
@ -1140,14 +1142,12 @@ TRI_fts_index_t* TRI_CreateFtsIndex(uint32_t handleChunkSize,
|
|||
idx->_memoryBase += sizeof(TRI_fulltext_handles_t);
|
||||
#endif
|
||||
|
||||
TRI_InitReadWriteLock(&idx->_lock);
|
||||
|
||||
return (TRI_fts_index_t*)idx;
|
||||
}
|
||||
|
||||
/// @brief free the fulltext index
|
||||
void TRI_FreeFtsIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
// free root node (this will recursively free all other nodes)
|
||||
FreeNode(idx, idx->_root);
|
||||
|
@ -1165,14 +1165,12 @@ void TRI_FreeFtsIndex(TRI_fts_index_t* ftx) {
|
|||
TRI_ASSERT(idx->_memoryAllocated == sizeof(index__t));
|
||||
#endif
|
||||
|
||||
TRI_DestroyReadWriteLock(&idx->_lock);
|
||||
|
||||
// free index itself
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, idx);
|
||||
delete idx;
|
||||
}
|
||||
|
||||
void TRI_TruncateMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
// free root node (this will recursively free all other nodes)
|
||||
FreeNode(idx, idx->_root);
|
||||
|
@ -1213,11 +1211,10 @@ void TRI_TruncateMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
|||
/// @brief delete a document from the index
|
||||
void TRI_DeleteDocumentMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
||||
const TRI_voc_rid_t document) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_WriteLockReadWriteLock(&idx->_lock);
|
||||
WRITE_LOCKER(guard, idx->_lock);
|
||||
TRI_DeleteDocumentHandleMMFilesFulltextIndex(idx->_handles, document);
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
}
|
||||
|
||||
/// @brief insert a list of words into the index
|
||||
|
@ -1231,7 +1228,7 @@ void TRI_DeleteDocumentMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
/// prefixes
|
||||
bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
||||
const TRI_voc_rid_t document,
|
||||
std::vector<std::string>& wordlist) {
|
||||
std::set<std::string>& wordlist) {
|
||||
index__t* idx;
|
||||
TRI_fulltext_handle_t handle;
|
||||
node_t* paths[MAX_WORD_BYTES + 4];
|
||||
|
@ -1250,16 +1247,15 @@ bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
// for words with common prefixes (which will be adjacent in the sorted list
|
||||
// of words)
|
||||
// The default comparator (<) is exactly what we need here
|
||||
std::sort(wordlist.begin(), wordlist.end());
|
||||
//std::sort(wordlist.begin(), wordlist.end());
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_WriteLockReadWriteLock(&idx->_lock);
|
||||
WRITE_LOCKER(guard, idx->_lock);
|
||||
|
||||
// get a new handle for the document
|
||||
handle = TRI_InsertHandleMMFilesFulltextIndex(idx->_handles, document);
|
||||
if (handle == 0) {
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1268,19 +1264,15 @@ bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
// start for the 1st word inserted
|
||||
paths[0] = idx->_root;
|
||||
lastLength = 0;
|
||||
|
||||
size_t w = 0;
|
||||
size_t numWords = wordlist.size();
|
||||
while (w < numWords) {
|
||||
|
||||
std::string const* prev = nullptr;
|
||||
for (std::string const& tmp : wordlist) {
|
||||
node_t* node;
|
||||
char const* p;
|
||||
size_t start;
|
||||
size_t i;
|
||||
|
||||
// LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "checking word " << wordlist->_words[w];
|
||||
|
||||
if (w > 0) {
|
||||
std::string const& tmp = wordlist[w];
|
||||
|
||||
if (prev != nullptr) {
|
||||
// check if current word has a shared/common prefix with the previous word
|
||||
// inserted
|
||||
// in case this is true, we can use an optimisation and do not need to
|
||||
|
@ -1288,70 +1280,66 @@ bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
// tree from the root again. instead, we just start at the node at the end
|
||||
// of the
|
||||
// shared/common prefix. this will save us a lot of tree lookups
|
||||
start = CommonPrefixLength(wordlist[w - 1], tmp);
|
||||
start = CommonPrefixLength(*prev, tmp);
|
||||
if (start > MAX_WORD_BYTES) {
|
||||
start = MAX_WORD_BYTES;
|
||||
}
|
||||
|
||||
|
||||
// check if current word is the same as the last word. we do not want to
|
||||
// insert the
|
||||
// same word multiple times for the same document
|
||||
if (start > 0 && start == lastLength &&
|
||||
start == tmp.size()) {
|
||||
// duplicate word, skip it and continue with next word
|
||||
w++;
|
||||
continue;
|
||||
}
|
||||
} else {
|
||||
start = 0;
|
||||
}
|
||||
|
||||
prev = &tmp;
|
||||
|
||||
// for words with common prefixes, use the most appropriate start node we
|
||||
// do not need to traverse the tree from the root again
|
||||
node = paths[start];
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
TRI_ASSERT(node != nullptr);
|
||||
#endif
|
||||
|
||||
|
||||
// now insert into the tree, starting at the next character after the common
|
||||
// prefix
|
||||
std::string tmp = wordlist[w++].substr(start);
|
||||
p = tmp.c_str();
|
||||
|
||||
//std::string suffix = tmp.substr(start);
|
||||
p = tmp.c_str() + start;
|
||||
|
||||
for (i = start; *p && i <= MAX_WORD_BYTES; ++i) {
|
||||
node_char_t c = (node_char_t) * (p++);
|
||||
|
||||
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
TRI_ASSERT(node != nullptr);
|
||||
#endif
|
||||
|
||||
|
||||
node = EnsureSubNode(idx, node, c);
|
||||
if (node == nullptr) {
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
TRI_ASSERT(node != nullptr);
|
||||
#endif
|
||||
|
||||
|
||||
paths[i + 1] = node;
|
||||
}
|
||||
|
||||
|
||||
if (!InsertHandle(idx, node, handle)) {
|
||||
// document was added at least once, mark it as deleted
|
||||
TRI_DeleteDocumentHandleMMFilesFulltextIndex(idx->_handles, document);
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
// store length of word just inserted
|
||||
// we'll use that to compare with the next word for duplicate removal
|
||||
lastLength = i;
|
||||
}
|
||||
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1393,9 +1381,9 @@ TRI_fulltext_result_t* TRI_QueryMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
|
||||
auto maxResults = query->_maxResults;
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_ReadLockReadWriteLock(&idx->_lock);
|
||||
READ_LOCKER(guard, idx->_lock);
|
||||
|
||||
// initial result is empty
|
||||
result = nullptr;
|
||||
|
@ -1463,22 +1451,20 @@ TRI_fulltext_result_t* TRI_QueryMMFilesFulltextIndex(TRI_fts_index_t* const ftx,
|
|||
|
||||
if (result == nullptr) {
|
||||
// if we haven't found anything...
|
||||
TRI_ReadUnlockReadWriteLock(&idx->_lock);
|
||||
return TRI_CreateResultMMFilesFulltextIndex(0);
|
||||
}
|
||||
|
||||
// now convert the handle list into a result (this will also filter out
|
||||
// deleted documents)
|
||||
TRI_fulltext_result_t* r = MakeListResult(idx, result, maxResults);
|
||||
TRI_ReadUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return r;
|
||||
}
|
||||
|
||||
/// @brief dump index tree
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpTreeFtsIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
void TRI_DumpTreeFtsIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
TRI_DumpHandleMMFilesFulltextIndex(idx->_handles);
|
||||
DumpNode(idx->_root, 0);
|
||||
|
@ -1487,8 +1473,8 @@ void TRI_DumpTreeFtsIndex(const TRI_fts_index_t* const ftx) {
|
|||
|
||||
/// @brief dump index statistics
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpStatsFtsIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
void TRI_DumpStatsFtsIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
TRI_fulltext_stats_t stats;
|
||||
|
||||
stats = TRI_StatsMMFilesFulltextIndex(idx);
|
||||
|
@ -1513,14 +1499,12 @@ void TRI_DumpStatsFtsIndex(const TRI_fts_index_t* const ftx) {
|
|||
#endif
|
||||
|
||||
/// @brief return stats about the index
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx;
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
READ_LOCKER(guard, idx->_lock);
|
||||
|
||||
TRI_fulltext_stats_t stats;
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
|
||||
TRI_ReadLockReadWriteLock(&idx->_lock);
|
||||
|
||||
stats._memoryTotal = TRI_MemoryMMFilesFulltextIndex(idx);
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
stats._memoryOwn = idx->_memoryAllocated;
|
||||
|
@ -1547,14 +1531,14 @@ TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(const TRI_fts_index_t* const
|
|||
stats._shouldCompact = false;
|
||||
}
|
||||
|
||||
TRI_ReadUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
/// @brief return the total memory used by the index
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(const TRI_fts_index_t* const ftx) {
|
||||
index__t* idx = (index__t*)ftx;
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
// no need to lock here, as we are called from under a lock already
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
if (idx->_handles != nullptr) {
|
||||
return idx->_memoryAllocated + TRI_MemoryHandleMMFilesFulltextIndex(idx->_handles);
|
||||
|
@ -1565,21 +1549,18 @@ size_t TRI_MemoryMMFilesFulltextIndex(const TRI_fts_index_t* const ftx) {
|
|||
|
||||
/// @brief compact the fulltext index
|
||||
/// note: the caller must hold a lock on the index before called this
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const ftx) {
|
||||
index__t* idx;
|
||||
TRI_fulltext_handles_t* clone;
|
||||
|
||||
idx = (index__t*)ftx;
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* ftx) {
|
||||
index__t* idx = static_cast<index__t*>(ftx);
|
||||
|
||||
// but don't block if the index is busy
|
||||
// try to acquire the write lock to clean up
|
||||
if (!TRI_TryWriteLockReadWriteLock(&idx->_lock)) {
|
||||
TRY_WRITE_LOCKER(guard, idx->_lock);
|
||||
if (!guard.isLocked()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!TRI_ShouldCompactHandleMMFilesFulltextIndex(idx->_handles)) {
|
||||
// not enough cleanup work to do
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1587,9 +1568,8 @@ bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const ftx) {
|
|||
// re-align the handle numbers consecutively, starting at 1.
|
||||
// this will also populate the _map property, which can be used to clean up
|
||||
// handles of existing nodes
|
||||
clone = TRI_CompactHandleMMFilesFulltextIndex(idx->_handles);
|
||||
TRI_fulltext_handles_t* clone = TRI_CompactHandleMMFilesFulltextIndex(idx->_handles);
|
||||
if (clone == nullptr) {
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1604,7 +1584,6 @@ bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const ftx) {
|
|||
|
||||
// cleanup finished, now switch over
|
||||
idx->_handles = clone;
|
||||
TRI_WriteUnlockReadWriteLock(&idx->_lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ void TRI_DeleteDocumentMMFilesFulltextIndex(TRI_fts_index_t* const,
|
|||
/// @brief insert a list of words to the index
|
||||
bool TRI_InsertWordsMMFilesFulltextIndex(TRI_fts_index_t* const,
|
||||
const TRI_voc_rid_t,
|
||||
std::vector<std::string>&);
|
||||
std::set<std::string>&);
|
||||
|
||||
/// @brief find all documents that contain a word (exact match)
|
||||
#if 0
|
||||
|
@ -95,21 +95,21 @@ struct TRI_fulltext_result_s* TRI_QueryMMFilesFulltextIndex(
|
|||
|
||||
/// @brief dump index tree
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpTreeFtsIndex(const TRI_fts_index_t* const);
|
||||
void TRI_DumpTreeFtsIndex(TRI_fts_index_t*);
|
||||
#endif
|
||||
|
||||
/// @brief dump index statistics
|
||||
#if TRI_FULLTEXT_DEBUG
|
||||
void TRI_DumpStatsFtsIndex(const TRI_fts_index_t* const);
|
||||
void TRI_DumpStatsFtsIndex(TRI_fts_index_t*);
|
||||
#endif
|
||||
|
||||
/// @brief return stats about the index
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(const TRI_fts_index_t* const);
|
||||
TRI_fulltext_stats_t TRI_StatsMMFilesFulltextIndex(TRI_fts_index_t*);
|
||||
|
||||
/// @brief return the total memory used by the index
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(const TRI_fts_index_t* const);
|
||||
size_t TRI_MemoryMMFilesFulltextIndex(TRI_fts_index_t*);
|
||||
|
||||
/// @brief compact the fulltext index
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t* const);
|
||||
bool TRI_CompactMMFilesFulltextIndex(TRI_fts_index_t*);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -194,18 +194,6 @@ void RocksDBCollection::open(bool ignoreErrors) {
|
|||
}
|
||||
}
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int RocksDBCollection::iterateMarkersOnLoad(
|
||||
arangodb::transaction::Methods* trx) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool RocksDBCollection::isFullyCollected() const {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
void RocksDBCollection::prepareIndexes(
|
||||
arangodb::velocypack::Slice indexesSlice) {
|
||||
TRI_ASSERT(indexesSlice.isArray());
|
||||
|
@ -837,15 +825,6 @@ bool RocksDBCollection::readDocument(transaction::Methods* trx,
|
|||
return res.ok();
|
||||
}
|
||||
|
||||
bool RocksDBCollection::readDocumentConditional(
|
||||
transaction::Methods* trx, DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
|
||||
// should not be called for RocksDB engine. TODO: move this out of general
|
||||
// API!
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return false;
|
||||
}
|
||||
|
||||
int RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const slice,
|
||||
arangodb::ManagedDocumentResult& mdr,
|
||||
|
|
|
@ -80,11 +80,6 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
size_t memory() const override;
|
||||
void open(bool ignoreErrors) override;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
int iterateMarkersOnLoad(arangodb::transaction::Methods* trx) override;
|
||||
|
||||
bool isFullyCollected() const override;
|
||||
|
||||
////////////////////////////////////
|
||||
// -- SECTION Indexes --
|
||||
///////////////////////////////////
|
||||
|
@ -133,11 +128,6 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) override;
|
||||
|
||||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result) override;
|
||||
|
||||
int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
arangodb::ManagedDocumentResult& result, OperationOptions& options,
|
||||
|
|
|
@ -52,30 +52,7 @@ RocksDBCollectionExport::RocksDBCollectionExport(
|
|||
|
||||
RocksDBCollectionExport::~RocksDBCollectionExport() {}
|
||||
|
||||
void RocksDBCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
||||
|
||||
// none of this should matter on rocksdb
|
||||
// try to acquire the exclusive lock on the compaction
|
||||
/*
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
engine->preventCompaction(_collection->vocbase(),
|
||||
[this](TRI_vocbase_t* vocbase) {
|
||||
// TODO: do something under compaction lock?
|
||||
});
|
||||
|
||||
{
|
||||
static uint64_t const SleepTime = 10000;
|
||||
|
||||
uint64_t tries = 0;
|
||||
uint64_t const maxTries = maxWaitTime / SleepTime;
|
||||
|
||||
while (++tries < maxTries) {
|
||||
if (_collection->getPhysical()->isFullyCollected()) {
|
||||
break;
|
||||
}
|
||||
usleep(SleepTime);
|
||||
}
|
||||
}*/
|
||||
void RocksDBCollectionExport::run(size_t limit) {
|
||||
|
||||
{
|
||||
SingleCollectionTransaction trx(
|
||||
|
@ -110,10 +87,6 @@ void RocksDBCollectionExport::run(uint64_t maxWaitTime, size_t limit) {
|
|||
_vpack.emplace_back(VPackSlice(mmdr.vpack()));
|
||||
--limit;
|
||||
}
|
||||
/*if (_collection->readDocumentConditional(&trx, token, 0, mmdr)) {
|
||||
_vpack.emplace_back(VPackSlice(mmdr.vpack()));
|
||||
--limit;
|
||||
}*/
|
||||
return true;
|
||||
});
|
||||
|
||||
|
|
|
@ -61,7 +61,7 @@ class RocksDBCollectionExport {
|
|||
~RocksDBCollectionExport();
|
||||
|
||||
public:
|
||||
void run(uint64_t, size_t);
|
||||
void run(size_t);
|
||||
|
||||
private:
|
||||
std::unique_ptr<arangodb::CollectionGuard> _guard;
|
||||
|
|
|
@ -527,8 +527,7 @@ std::string RocksDBEngine::collectionPath(TRI_vocbase_t const* vocbase,
|
|||
}
|
||||
|
||||
void RocksDBEngine::waitForSync(TRI_voc_tick_t tick) {
|
||||
// TODO: does anything need to be done here?
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
// intentionally empty, not useful for this type of engine
|
||||
}
|
||||
|
||||
std::shared_ptr<arangodb::velocypack::Builder>
|
||||
|
@ -835,20 +834,6 @@ void RocksDBEngine::createIndex(TRI_vocbase_t* vocbase,
|
|||
TRI_idx_iid_t indexId,
|
||||
arangodb::velocypack::Slice const& data) {}
|
||||
|
||||
void RocksDBEngine::dropIndex(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_cid_t collectionId, TRI_idx_iid_t iid) {
|
||||
// probably not required
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::dropIndexWalMarker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data,
|
||||
bool writeMarker, int&) {
|
||||
// probably not required
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::unloadCollection(TRI_vocbase_t* vocbase,
|
||||
arangodb::LogicalCollection* collection) {
|
||||
// TODO: does anything else have to happen?
|
||||
|
@ -892,80 +877,10 @@ void RocksDBEngine::signalCleanup(TRI_vocbase_t*) {
|
|||
// nothing to do here
|
||||
}
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
void RocksDBEngine::iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
bool RocksDBEngine::cleanupCompactionBlockers(TRI_vocbase_t* vocbase) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
int RocksDBEngine::insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
|
||||
TRI_voc_tick_t& id) {
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
int RocksDBEngine::extendCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id, double ttl) {
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
int RocksDBEngine::removeCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there
|
||||
/// is no compaction ongoing
|
||||
void RocksDBEngine::preventCompaction(
|
||||
TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
bool RocksDBEngine::tryPreventCompaction(
|
||||
TRI_vocbase_t* vocbase, std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return true;
|
||||
}
|
||||
|
||||
int RocksDBEngine::shutdownDatabase(TRI_vocbase_t* vocbase) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
int RocksDBEngine::openCollection(TRI_vocbase_t* vocbase,
|
||||
LogicalCollection* collection,
|
||||
bool ignoreErrors) {
|
||||
THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
return 0;
|
||||
}
|
||||
|
||||
/// @brief Add engine-specific AQL functions.
|
||||
void RocksDBEngine::addAqlFunctions() {
|
||||
RocksDBAqlFunctions::registerResources();
|
||||
|
@ -1007,16 +922,16 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
|
|||
return it->second;
|
||||
}
|
||||
|
||||
bool RocksDBEngine::syncWal() {
|
||||
arangodb::Result RocksDBEngine::syncWal() {
|
||||
#ifdef _WIN32
|
||||
// SyncWAL always reports "not implemented" on Windows
|
||||
return true;
|
||||
return arangodb::Result();
|
||||
#else
|
||||
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
return false;
|
||||
return rocksutils::convertStatus(status);
|
||||
}
|
||||
return true;
|
||||
return arangodb::Result();
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -172,11 +172,6 @@ class RocksDBEngine final : public StorageEngine {
|
|||
void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id,
|
||||
arangodb::velocypack::Slice const& data) override;
|
||||
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id) override;
|
||||
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data,
|
||||
bool writeMarker, int&) override;
|
||||
void unloadCollection(TRI_vocbase_t* vocbase,
|
||||
arangodb::LogicalCollection* collection) override;
|
||||
void createView(TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
|
||||
|
@ -192,50 +187,8 @@ class RocksDBEngine final : public StorageEngine {
|
|||
arangodb::LogicalView const*, bool doSync) override;
|
||||
void signalCleanup(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
void iterateDocuments(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb)
|
||||
override;
|
||||
void addDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
void removeDocumentRevision(
|
||||
TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) override;
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl,
|
||||
TRI_voc_tick_t& id) override;
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id,
|
||||
double ttl) override;
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
int removeCompactionBlocker(TRI_vocbase_t* vocbase,
|
||||
TRI_voc_tick_t id) override;
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there
|
||||
/// is no compaction ongoing
|
||||
void preventCompaction(
|
||||
TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) override;
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
bool tryPreventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) override;
|
||||
|
||||
int shutdownDatabase(TRI_vocbase_t* vocbase) override;
|
||||
|
||||
int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection,
|
||||
bool ignoreErrors) override;
|
||||
|
||||
/// @brief Add engine-specific AQL functions.
|
||||
void addAqlFunctions() override;
|
||||
|
||||
|
@ -281,7 +234,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
static std::string const FeatureName;
|
||||
RocksDBCounterManager* counterManager() const;
|
||||
RocksDBReplicationManager* replicationManager() const;
|
||||
bool syncWal();
|
||||
arangodb::Result syncWal();
|
||||
|
||||
private:
|
||||
/// single rocksdb database used in this storage engine
|
||||
|
|
|
@ -192,11 +192,10 @@ bool RocksDBFulltextIndex::matchesDefinition(VPackSlice const& info) const {
|
|||
int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
||||
|
@ -206,10 +205,8 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
|||
RocksDBValue value = RocksDBValue::IndexValue();
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
size_t const count = words.size();
|
||||
size_t i = 0;
|
||||
for (; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
// size_t const count = words.size();
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
|
||||
|
@ -220,21 +217,21 @@ int RocksDBFulltextIndex::insert(transaction::Methods* trx,
|
|||
break;
|
||||
}
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
/*if (res != TRI_ERROR_NO_ERROR) {
|
||||
for (size_t j = 0; j < i; ++j) {
|
||||
std::string const& word = words[j];
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
rtrx->Delete(key.string());
|
||||
}
|
||||
}
|
||||
}*/
|
||||
return res;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
@ -244,9 +241,7 @@ int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
|||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
RocksDBValue value = RocksDBValue::IndexValue();
|
||||
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
batch->Put(key.string(), value.string());
|
||||
|
@ -258,7 +253,7 @@ int RocksDBFulltextIndex::insertRaw(rocksdb::WriteBatchWithIndex* batch,
|
|||
int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
||||
TRI_voc_rid_t revisionId,
|
||||
VPackSlice const& doc, bool isRollback) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
if (words.empty()) {
|
||||
// TODO: distinguish the cases "empty wordlist" and "out of memory"
|
||||
// LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not build wordlist";
|
||||
|
@ -272,9 +267,7 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
|||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
|
||||
|
@ -287,15 +280,14 @@ int RocksDBFulltextIndex::remove(transaction::Methods* trx,
|
|||
return res;
|
||||
}
|
||||
|
||||
int RocksDBFulltextIndex::removeRaw(rocksdb::WriteBatchWithIndex* batch, TRI_voc_rid_t,
|
||||
int RocksDBFulltextIndex::removeRaw(rocksdb::WriteBatchWithIndex* batch,
|
||||
TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const& doc) {
|
||||
std::vector<std::string> words = wordlist(doc);
|
||||
std::set<std::string> words = wordlist(doc);
|
||||
// now we are going to construct the value to insert into rocksdb
|
||||
// unique indexes have a different key structure
|
||||
StringRef docKey(doc.get(StaticStrings::KeyString));
|
||||
size_t const count = words.size();
|
||||
for (size_t i = 0; i < count; ++i) {
|
||||
std::string const& word = words[i];
|
||||
for (std::string const& word : words) {
|
||||
RocksDBKey key =
|
||||
RocksDBKey::FulltextIndexValue(_objectId, StringRef(word), docKey);
|
||||
batch->Delete(key.string());
|
||||
|
@ -315,9 +307,8 @@ int RocksDBFulltextIndex::cleanup() {
|
|||
|
||||
/// @brief walk over the attribute. Also Extract sub-attributes and elements in
|
||||
/// list.
|
||||
static void ExtractWords(std::vector<std::string>& words,
|
||||
VPackSlice const value, size_t minWordLength,
|
||||
int level) {
|
||||
static void ExtractWords(std::set<std::string>& words, VPackSlice const value,
|
||||
size_t minWordLength, int level) {
|
||||
if (value.isString()) {
|
||||
// extract the string value for the indexed attribute
|
||||
std::string text = value.copyString();
|
||||
|
@ -340,8 +331,8 @@ static void ExtractWords(std::vector<std::string>& words,
|
|||
|
||||
/// @brief callback function called by the fulltext index to determine the
|
||||
/// words to index for a specific document
|
||||
std::vector<std::string> RocksDBFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> RocksDBFulltextIndex::wordlist(VPackSlice const& doc) {
|
||||
std::set<std::string> words;
|
||||
try {
|
||||
VPackSlice const value = doc.get(_attr);
|
||||
|
||||
|
|
|
@ -131,7 +131,7 @@ class RocksDBFulltextIndex final : public RocksDBIndex {
|
|||
velocypack::Builder& builder);
|
||||
|
||||
private:
|
||||
std::vector<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
std::set<std::string> wordlist(arangodb::velocypack::Slice const&);
|
||||
|
||||
/// @brief the indexed attribute (path)
|
||||
std::vector<std::string> _attr;
|
||||
|
|
|
@ -295,7 +295,7 @@ RocksDBGeoIndex::RocksDBGeoIndex(TRI_idx_iid_t iid,
|
|||
TRI_ASSERT(pair.first);
|
||||
numSlots = pair.second;
|
||||
}
|
||||
|
||||
|
||||
_geoIndex = GeoIndex_new(_objectId, numPots, numSlots);
|
||||
if (_geoIndex == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
|
|
|
@ -46,10 +46,10 @@ RocksDBIndex::RocksDBIndex(
|
|||
_cachePresent(false),
|
||||
_useCache(useCache) {
|
||||
if (_useCache) {
|
||||
LOG_TOPIC(ERR, Logger::FIXME) << "creating cache";
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "creating cache";
|
||||
createCache();
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::FIXME) << "not creating cache";
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "not creating cache";
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -87,8 +87,8 @@ void RocksDBIndex::toVelocyPackFigures(VPackBuilder& builder) const {
|
|||
}
|
||||
|
||||
int RocksDBIndex::unload() {
|
||||
LOG_TOPIC(ERR, Logger::FIXME) << "unload cache";
|
||||
if (useCache()) {
|
||||
//LOG_TOPIC(ERR, Logger::FIXME) << "unload cache";
|
||||
disableCache();
|
||||
TRI_ASSERT(!_cachePresent);
|
||||
}
|
||||
|
|
|
@ -223,30 +223,13 @@ void RocksDBRestExportHandler::createCursor() {
|
|||
}
|
||||
|
||||
VPackSlice options = optionsBuilder.slice();
|
||||
|
||||
uint64_t waitTime = 0;
|
||||
bool flush = arangodb::basics::VelocyPackHelper::getBooleanValue(
|
||||
options, "flush", false);
|
||||
|
||||
if (flush) {
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
double flushWait =
|
||||
arangodb::basics::VelocyPackHelper::getNumericValue<double>(
|
||||
options, "flushWait", 10.0);
|
||||
|
||||
waitTime = static_cast<uint64_t>(
|
||||
flushWait * 1000 *
|
||||
1000); // flushWait is specified in s, but we need ns
|
||||
}
|
||||
|
||||
size_t limit = arangodb::basics::VelocyPackHelper::getNumericValue<size_t>(
|
||||
options, "limit", 0);
|
||||
|
||||
// this may throw!
|
||||
auto collectionExport =
|
||||
std::make_unique<RocksDBCollectionExport>(_vocbase, name, _restrictions);
|
||||
collectionExport->run(waitTime, limit);
|
||||
collectionExport->run(limit);
|
||||
|
||||
size_t batchSize =
|
||||
arangodb::basics::VelocyPackHelper::getNumericValue<size_t>(
|
||||
|
|
|
@ -366,11 +366,6 @@ void RocksDBRestReplicationHandler::handleCommandBatch() {
|
|||
// extract ttl
|
||||
// double expires =
|
||||
// VelocyPackHelper::getNumericValue<double>(input->slice(), "ttl", 0);
|
||||
|
||||
// TRI_voc_tick_t id;
|
||||
// StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
// int res = engine->insertCompactionBlocker(_vocbase, expires, id);
|
||||
|
||||
RocksDBReplicationContext* ctx = _manager->createContext();
|
||||
if (ctx == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "unable to create replication context");
|
||||
|
@ -1639,9 +1634,6 @@ void RocksDBRestReplicationHandler::handleCommandSync() {
|
|||
config._verbose = verbose;
|
||||
config._useCollectionId = useCollectionId;
|
||||
|
||||
// wait until all data in current logfile got synced
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
InitialSyncer syncer(_vocbase, &config, restrictCollections, restrictType,
|
||||
verbose);
|
||||
|
||||
|
|
|
@ -46,8 +46,10 @@ static void JS_FlushWal(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
|
||||
arangodb::Result ret = static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->syncWal();
|
||||
if (!ret.ok()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(ret.errorNumber(), ret.errorMessage());
|
||||
}
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
|
|
@ -84,11 +84,6 @@ class PhysicalCollection {
|
|||
/// @brief opens an existing collection
|
||||
virtual void open(bool ignoreErrors) = 0;
|
||||
|
||||
/// @brief iterate all markers of a collection on load
|
||||
virtual int iterateMarkersOnLoad(transaction::Methods* trx) = 0;
|
||||
|
||||
virtual bool isFullyCollected() const = 0;
|
||||
|
||||
void drop();
|
||||
|
||||
////////////////////////////////////
|
||||
|
@ -144,11 +139,6 @@ class PhysicalCollection {
|
|||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) = 0;
|
||||
|
||||
virtual bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result) = 0;
|
||||
|
||||
virtual int insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
arangodb::ManagedDocumentResult& result,
|
||||
|
|
|
@ -321,18 +321,6 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
virtual void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id, arangodb::velocypack::Slice const& data) = 0;
|
||||
|
||||
// asks the storage engine to drop the specified index and persist the deletion
|
||||
// info. Note that physical deletion of the index must not be carried out by this call,
|
||||
// as there may still be users of the index. It is recommended that this operation
|
||||
// only sets a deletion flag for the index but let's an async task perform
|
||||
// the actual deletion.
|
||||
// the WAL entry for index deletion will be written *after* the call
|
||||
// to "dropIndex" returns
|
||||
virtual void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t id) = 0;
|
||||
|
||||
virtual void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& data, bool useMarker, int&) = 0;
|
||||
// Returns the StorageEngine-specific implementation
|
||||
// of the IndexFactory. This is used to validate
|
||||
// information about indexes.
|
||||
|
@ -347,53 +335,8 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
|
||||
virtual void signalCleanup(TRI_vocbase_t* vocbase) = 0;
|
||||
|
||||
// document operations
|
||||
// -------------------
|
||||
|
||||
// iterate all documents of the underlying collection
|
||||
// this is called when a collection is openend, and all its documents need to be added to
|
||||
// indexes etc.
|
||||
virtual void iterateDocuments(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
std::function<void(arangodb::velocypack::Slice const&)> const& cb) = 0;
|
||||
|
||||
|
||||
// adds a document to the storage engine
|
||||
// this will be called by the WAL collector when surviving documents are being moved
|
||||
// into the storage engine's realm
|
||||
virtual void addDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) = 0;
|
||||
|
||||
// removes a document from the storage engine
|
||||
// this will be called by the WAL collector when non-surviving documents are being removed
|
||||
// from the storage engine's realm
|
||||
virtual void removeDocumentRevision(TRI_voc_tick_t databaseId, TRI_voc_cid_t collectionId,
|
||||
arangodb::velocypack::Slice const& document) = 0;
|
||||
|
||||
/// @brief remove data of expired compaction blockers
|
||||
virtual bool cleanupCompactionBlockers(TRI_vocbase_t* vocbase) = 0;
|
||||
|
||||
/// @brief insert a compaction blocker
|
||||
virtual int insertCompactionBlocker(TRI_vocbase_t* vocbase, double ttl, TRI_voc_tick_t& id) = 0;
|
||||
|
||||
/// @brief touch an existing compaction blocker
|
||||
virtual int extendCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id, double ttl) = 0;
|
||||
|
||||
/// @brief remove an existing compaction blocker
|
||||
virtual int removeCompactionBlocker(TRI_vocbase_t* vocbase, TRI_voc_tick_t id) = 0;
|
||||
|
||||
/// @brief a callback function that is run while it is guaranteed that there is no compaction ongoing
|
||||
virtual void preventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback) = 0;
|
||||
|
||||
/// @brief a callback function that is run there is no compaction ongoing
|
||||
virtual bool tryPreventCompaction(TRI_vocbase_t* vocbase,
|
||||
std::function<void(TRI_vocbase_t*)> const& callback,
|
||||
bool checkForActiveBlockers) = 0;
|
||||
|
||||
virtual int shutdownDatabase(TRI_vocbase_t* vocbase) = 0;
|
||||
|
||||
virtual int openCollection(TRI_vocbase_t* vocbase, LogicalCollection* collection, bool ignoreErrors) = 0;
|
||||
|
||||
// AQL functions
|
||||
// -------------
|
||||
|
||||
|
|
|
@ -1412,7 +1412,7 @@ OperationResult transaction::Methods::insertLocal(
|
|||
res = workForOneDocument(value);
|
||||
}
|
||||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 &&
|
||||
isSingleOperationTransaction()) {
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
|
@ -1723,7 +1723,7 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
res = workForOneDocument(newValue, false);
|
||||
}
|
||||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
|
||||
if (res.ok() && options.waitForSync && maxTick > 0 &&
|
||||
isSingleOperationTransaction()) {
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
|
@ -1968,7 +1968,7 @@ OperationResult transaction::Methods::removeLocal(
|
|||
res = workForOneDocument(value, false);
|
||||
}
|
||||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
// wait for operation(s) to be synced to disk here. On rocksdb maxTick == 0
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 &&
|
||||
isSingleOperationTransaction()) {
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
|
|
|
@ -1160,12 +1160,6 @@ bool LogicalCollection::readDocument(transaction::Methods* trx,
|
|||
return getPhysical()->readDocument(trx, token, result);
|
||||
}
|
||||
|
||||
bool LogicalCollection::readDocumentConditional(
|
||||
transaction::Methods* trx, DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
|
||||
return getPhysical()->readDocumentConditional(trx, token, maxTick, result);
|
||||
}
|
||||
|
||||
/// @brief a method to skip certain documents in AQL write operations,
|
||||
/// this is only used in the enterprise edition for smart graphs
|
||||
#ifndef USE_ENTERPRISE
|
||||
|
|
|
@ -277,11 +277,6 @@ class LogicalCollection {
|
|||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
/// @brief Persist the connected physical collection.
|
||||
/// This should be called AFTER the collection is successfully
|
||||
/// created and only on Sinlge/DBServer
|
||||
|
|
|
@ -394,7 +394,7 @@ char* Utf8Helper::toupper(TRI_memory_zone_t* zone, char const* src,
|
|||
/// @brief Extract the words from a UTF-8 string.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool Utf8Helper::getWords(std::vector<std::string>& words,
|
||||
bool Utf8Helper::getWords(std::set<std::string>& words,
|
||||
std::string const& text, size_t minimalLength,
|
||||
size_t maximalLength, bool lowerCase) {
|
||||
UErrorCode status = U_ZERO_ERROR;
|
||||
|
@ -458,21 +458,6 @@ bool Utf8Helper::getWords(std::vector<std::string>& words,
|
|||
return false;
|
||||
}
|
||||
|
||||
// estimate an initial vector size. this is not accurate, but setting the
|
||||
// initial size to some
|
||||
// value in the correct order of magnitude will save a lot of vector
|
||||
// reallocations later
|
||||
size_t initialWordCount = textLength / (2 * (minimalLength + 1));
|
||||
if (initialWordCount < 32) {
|
||||
// alloc at least 32 pointers (= 256b)
|
||||
initialWordCount = 32;
|
||||
} else if (initialWordCount > 8192) {
|
||||
// alloc at most 8192 pointers (= 64kb)
|
||||
initialWordCount = 8192;
|
||||
}
|
||||
// Reserve initialWordCount additional words in the vector
|
||||
words.reserve(words.size() + initialWordCount);
|
||||
|
||||
BreakIterator* wordIterator =
|
||||
BreakIterator::createWordInstance(locale, status);
|
||||
TRI_ASSERT(wordIterator != nullptr);
|
||||
|
@ -496,7 +481,7 @@ bool Utf8Helper::getWords(std::vector<std::string>& words,
|
|||
chunkLength, &utf8WordLength);
|
||||
if (utf8Word != nullptr) {
|
||||
std::string word(utf8Word, utf8WordLength);
|
||||
words.emplace_back(word);
|
||||
words.emplace(word);
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, utf8Word);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -136,7 +136,7 @@ class Utf8Helper {
|
|||
/// @brief returns the words of a UTF-8 string.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool getWords(std::vector<std::string>& words, std::string const& text,
|
||||
bool getWords(std::set<std::string>& words, std::string const& text,
|
||||
size_t minimalWordLength, size_t maximalWordLength,
|
||||
bool lowerCase);
|
||||
|
||||
|
|
|
@ -4007,7 +4007,7 @@ static void JS_SplitWordlist(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
lowerCase = TRI_ObjectToBoolean(args[3]);
|
||||
}
|
||||
|
||||
std::vector<std::string> wordList;
|
||||
std::set<std::string> wordList;
|
||||
|
||||
if (!Utf8Helper::DefaultUtf8Helper.getWords(
|
||||
wordList, stringToTokenize, minLength, maxLength, lowerCase)) {
|
||||
|
@ -4017,11 +4017,11 @@ static void JS_SplitWordlist(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
v8::Handle<v8::Array> v8WordList =
|
||||
v8::Array::New(isolate, static_cast<int>(wordList.size()));
|
||||
|
||||
size_t const n = static_cast<uint32_t>(wordList.size());
|
||||
|
||||
for (uint32_t i = 0; i < n; i++) {
|
||||
v8::Handle<v8::String> oneWord = TRI_V8_STD_STRING(wordList[i]);
|
||||
uint32_t i = 0;
|
||||
for (std::string const& word : wordList) {
|
||||
v8::Handle<v8::String> oneWord = TRI_V8_STD_STRING(word);
|
||||
v8WordList->Set(i, oneWord);
|
||||
i++;
|
||||
}
|
||||
|
||||
TRI_V8_RETURN(v8WordList);
|
||||
|
|
|
@ -172,26 +172,28 @@ SECTION("tst_3") {
|
|||
SECTION("tst_4") {
|
||||
std::string testString = "Der Müller geht in die Post.";
|
||||
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> words;
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 3, UINT32_MAX, true);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((5UL) == words.size());
|
||||
CHECK(std::string("der") == words[0]);
|
||||
CHECK(std::string("müller") == words[1]);
|
||||
CHECK(std::string("geht") == words[2]);
|
||||
CHECK(std::string("die") == words[3]);
|
||||
CHECK(std::string("post") == words[4]);
|
||||
CHECK(words.find(std::string("der")) != words.end());
|
||||
CHECK(words.find(std::string("müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("die")) != words.end());
|
||||
CHECK(words.find(std::string("post")) != words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 4, UINT32_MAX, true);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((3UL) == words.size());
|
||||
CHECK(std::string("müller") == words[0]);
|
||||
CHECK(std::string("geht") == words[1]);
|
||||
CHECK(std::string("post") == words[2]);
|
||||
|
||||
CHECK(words.find(std::string("müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("post")) != words.end());
|
||||
CHECK(words.find(std::string("der")) == words.end());
|
||||
CHECK(words.find(std::string("die")) == words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, "", 3, UINT32_MAX, true);
|
||||
CHECK(words.empty());
|
||||
|
@ -200,26 +202,28 @@ SECTION("tst_4") {
|
|||
SECTION("tst_5") {
|
||||
std::string testString = "Der Müller geht in die Post.";
|
||||
|
||||
std::vector<std::string> words;
|
||||
std::set<std::string> words;
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 3, UINT32_MAX, false);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((5UL) == words.size());
|
||||
CHECK(std::string("Der") == words[0]);
|
||||
CHECK(std::string("Müller") == words[1]);
|
||||
CHECK(std::string("geht") == words[2]);
|
||||
CHECK(std::string("die") == words[3]);
|
||||
CHECK(std::string("Post") == words[4]);
|
||||
CHECK(words.find(std::string("Der")) != words.end());
|
||||
CHECK(words.find(std::string("Müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("die")) != words.end());
|
||||
CHECK(words.find(std::string("Post")) != words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, testString, 4, UINT32_MAX, false);
|
||||
CHECK(!words.empty());
|
||||
|
||||
CHECK((3UL) == words.size());
|
||||
CHECK(std::string("Müller") == words[0]);
|
||||
CHECK(std::string("geht") == words[1]);
|
||||
CHECK(std::string("Post") == words[2]);
|
||||
|
||||
CHECK(words.find(std::string("Müller")) != words.end());
|
||||
CHECK(words.find(std::string("geht")) != words.end());
|
||||
CHECK(words.find(std::string("Post")) != words.end());
|
||||
CHECK(words.find(std::string("der")) == words.end());
|
||||
CHECK(words.find(std::string("die")) == words.end());
|
||||
|
||||
words.clear();
|
||||
arangodb::basics::Utf8Helper::DefaultUtf8Helper.getWords(words, "", 4, UINT32_MAX, false);
|
||||
CHECK(words.empty());
|
||||
|
|
Loading…
Reference in New Issue