mirror of https://gitee.com/bigwinds/arangodb
issue 520.1.1: decrease link commit interval to 1 second, add workaround for MMFiles not recovering document insertions via WAL (#8313)
This commit is contained in:
parent
4ae0cd42c3
commit
b1e30937bb
|
@ -31,7 +31,7 @@ Background:
|
|||
|
||||
@RESTSTRUCT{commitIntervalMsec,post_api_view_props,integer,optional,uint64}
|
||||
Wait at least this many milliseconds between committing view data store
|
||||
changes and making documents visible to queries (default: 60000, to disable
|
||||
changes and making documents visible to queries (default: 1000, to disable
|
||||
use: 0).
|
||||
For the case where there are a lot of inserts/updates, a lower value, until
|
||||
commit, will cause the index not to account for them and memory usage would
|
||||
|
|
|
@ -32,7 +32,7 @@ Background:
|
|||
|
||||
@RESTSTRUCT{commitIntervalMsec,post_api_view_props,integer,optional,uint64}
|
||||
Wait at least this many milliseconds between committing view data store
|
||||
changes and making documents visible to queries (default: 60000, to disable
|
||||
changes and making documents visible to queries (default: 1000, to disable
|
||||
use: 0).
|
||||
For the case where there are a lot of inserts/updates, a lower value, until
|
||||
commit, will cause the index not to account for them and memory usage would
|
||||
|
|
|
@ -31,7 +31,7 @@ Background:
|
|||
|
||||
@RESTSTRUCT{commitIntervalMsec,post_api_view_props,integer,optional,uint64}
|
||||
Wait at least this many milliseconds between committing view data store
|
||||
changes and making documents visible to queries (default: 60000, to disable
|
||||
changes and making documents visible to queries (default: 1000, to disable
|
||||
use: 0).
|
||||
For the case where there are a lot of inserts/updates, a lower value, until
|
||||
commit, will cause the index not to account for them and memory usage would
|
||||
|
|
|
@ -24,16 +24,17 @@
|
|||
#include "store/mmap_directory.hpp"
|
||||
#include "store/store_utils.hpp"
|
||||
|
||||
#include "Aql/QueryCache.h"
|
||||
#include "Basics/LocalTaskQueue.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "IResearchCommon.h"
|
||||
#include "IResearchFeature.h"
|
||||
#include "IResearchLinkHelper.h"
|
||||
#include "IResearchPrimaryKeyFilter.h"
|
||||
#include "IResearchView.h"
|
||||
#include "IResearchViewCoordinator.h"
|
||||
#include "Aql/QueryCache.h"
|
||||
#include "Basics/LocalTaskQueue.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "MMFiles/MMFilesCollection.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/DatabasePathFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
|
@ -376,6 +377,14 @@ void IResearchLink::batchInsert( // insert documents
|
|||
switch (_dataStore._recovery) {
|
||||
case RecoveryState::BEFORE_CHECKPOINT:
|
||||
return; // ignore all insertions before 'checkpoint'
|
||||
case RecoveryState::AFTER_CHECKPOINT:
|
||||
// FIXME TODO find a better way to force MMFiles WAL recovery
|
||||
// workaround MMFilesWalRecoverState not replaying document insertion
|
||||
// markers, but instead reinserting all documents into the index just before
|
||||
// the end of recovery
|
||||
if (!dynamic_cast<arangodb::MMFilesCollection*>(collection().getPhysical())) {
|
||||
break; // skip for non-MMFiles (fallthough for MMFiles)
|
||||
}
|
||||
case RecoveryState::DURING_CHECKPOINT:
|
||||
for (auto const& doc: batch) {
|
||||
ctx->remove(doc.first);
|
||||
|
@ -425,18 +434,13 @@ bool IResearchLink::canBeDropped() const {
|
|||
return true; // valid for a link to be dropped from an iResearch view
|
||||
}
|
||||
|
||||
arangodb::Result IResearchLink::cleanup() {
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @note assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result IResearchLink::cleanupUnsafe() {
|
||||
char runId = 0; // value not used
|
||||
|
||||
SCOPED_LOCK(_asyncSelf->mutex()); // '_dataStore' can be asynchronously modified
|
||||
|
||||
if (!*_asyncSelf) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_INDEX_HANDLE_BAD, // the current link is no longer valid (checked after ReadLock aquisition)
|
||||
std::string("failed to lock arangosearch link while cleaning up arangosearch link '") + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
// NOTE: assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
TRI_ASSERT(_dataStore); // must be valid if _asyncSelf->get() is valid
|
||||
|
||||
try {
|
||||
|
@ -465,14 +469,21 @@ arangodb::Result IResearchLink::commit() {
|
|||
|
||||
if (!*_asyncSelf) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_ARANGO_INDEX_HANDLE_BAD, // the current link is no longer
|
||||
// valid (checked after ReadLock
|
||||
// aquisition)
|
||||
std::string("failed to lock arangosearch link while commiting "
|
||||
"arangosearch link '") +
|
||||
std::to_string(id()) + "'");
|
||||
TRI_ERROR_ARANGO_INDEX_HANDLE_BAD, // the current link is no longer valid (checked after ReadLock aquisition)
|
||||
std::string("failed to lock arangosearch link while commiting arangosearch link '") + std::to_string(id()) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
return commitUnsafe();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @note assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result IResearchLink::commitUnsafe() {
|
||||
char runId = 0; // value not used
|
||||
|
||||
// NOTE: assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
TRI_ASSERT(_dataStore); // must be valid if _asyncSelf->get() is valid
|
||||
|
||||
try {
|
||||
|
@ -484,9 +495,7 @@ arangodb::Result IResearchLink::commit() {
|
|||
if (!reader) {
|
||||
// nothing more to do
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
<< "failed to update snapshot after commit, reuse the existing "
|
||||
"snapshot for arangosearch link '"
|
||||
<< id() << "'";
|
||||
<< "failed to update snapshot after commit, run id '" << size_t(&runId) << "', reuse the existing snapshot for arangosearch link '" << id() << "'";
|
||||
|
||||
return arangodb::Result();
|
||||
}
|
||||
|
@ -522,7 +531,7 @@ arangodb::Result IResearchLink::commit() {
|
|||
if (!out) { // create checkpoint
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_CANNOT_WRITE_FILE, // code
|
||||
std::string("failed to write checkpoint file for arangosearch link '") + std::to_string(id()) + "', ignoring commit success, path: " + checkpointFile
|
||||
std::string("failed to write checkpoint file for arangosearch link '") + std::to_string(id()) + "', run id '" + std::to_string(size_t(&runId)) + "', ignoring commit success, path: " + checkpointFile
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -532,14 +541,14 @@ arangodb::Result IResearchLink::commit() {
|
|||
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_IO_ERROR, // code
|
||||
std::string("caught exception while writing checkpoint file for arangosearch link '") + std::to_string(id()) + "': " + e.what()
|
||||
std::string("caught exception while writing checkpoint file for arangosearch link '") + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "': " + e.what()
|
||||
);
|
||||
} catch (...) {
|
||||
_dataStore._directory->remove(checkpointFile); // try to remove failed file
|
||||
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_IO_ERROR, // code
|
||||
std::string("caught exception while writing checkpoint file for arangosearch link '") + std::to_string(id()) + "'"
|
||||
std::string("caught exception while writing checkpoint file for arangosearch link '") + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
|
@ -555,24 +564,27 @@ arangodb::Result IResearchLink::commit() {
|
|||
} catch (arangodb::basics::Exception const& e) {
|
||||
return arangodb::Result( // result
|
||||
e.code(), // code
|
||||
std::string("caught exception while committing arangosearch link '") + std::to_string(id()) + "': " + e.what()
|
||||
std::string("caught exception while committing arangosearch link '") + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "': " + e.what()
|
||||
);
|
||||
} catch (std::exception const& e) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("caught exception while committing arangosearch link '") + std::to_string(id()) + "': " + e.what()
|
||||
std::string("caught exception while committing arangosearch link '") + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "': " + e.what()
|
||||
);
|
||||
} catch (...) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("caught exception while committing arangosearch link '") + std::to_string(id()) + "'"
|
||||
std::string("caught exception while committing arangosearch link '") + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
return arangodb::Result();
|
||||
}
|
||||
|
||||
arangodb::Result IResearchLink::consolidate( // consolidate segments
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @note assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result IResearchLink::consolidateUnsafe( // consolidate segments
|
||||
IResearchViewMeta::ConsolidationPolicy const& policy, // policy to apply
|
||||
irs::merge_writer::flush_progress_t const& progress // policy progress to use
|
||||
) {
|
||||
|
@ -585,15 +597,7 @@ arangodb::Result IResearchLink::consolidate( // consolidate segments
|
|||
);
|
||||
}
|
||||
|
||||
SCOPED_LOCK(_asyncSelf->mutex()); // '_dataStore' can be asynchronously modified
|
||||
|
||||
if (!*_asyncSelf) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_ARANGO_INDEX_HANDLE_BAD, // the current link is no longer valid (checked after ReadLock aquisition)
|
||||
std::string("failed to lock arangosearch link while executing consolidation policy '") + policy.properties().toString() + "' on arangosearch link '" + std::to_string(id()) + "' run id '" + std::to_string(size_t(&runId)) + "'"
|
||||
);
|
||||
}
|
||||
|
||||
// NOTE: assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
TRI_ASSERT(_dataStore); // must be valid if _asyncSelf->get() is valid
|
||||
|
||||
try {
|
||||
|
@ -1173,7 +1177,7 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback)
|
|||
state._last = std::chrono::system_clock::now(); // remember last task start time
|
||||
timeoutMsec = state._commitIntervalMsec;
|
||||
|
||||
auto res = commit(); // run commit
|
||||
auto res = commitUnsafe(); // run commit ('_asyncSelf' locked by async task)
|
||||
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
|
@ -1181,7 +1185,7 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback)
|
|||
} else if (state._cleanupIntervalStep // if enabled
|
||||
&& state._cleanupIntervalCount++ > state._cleanupIntervalStep) {
|
||||
state._cleanupIntervalCount = 0; // reset counter
|
||||
res = cleanup(); // run cleanup
|
||||
res = cleanupUnsafe(); // run cleanup ('_asyncSelf' locked by async task)
|
||||
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
|
@ -1240,7 +1244,8 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback)
|
|||
state._last = std::chrono::system_clock::now(); // remember last task start time
|
||||
timeoutMsec = state._consolidationIntervalMsec;
|
||||
|
||||
auto res = consolidate(state._consolidationPolicy, state._progress); // run consolidation
|
||||
auto res = // consolidate
|
||||
consolidateUnsafe(state._consolidationPolicy, state._progress); // run consolidation ('_asyncSelf' locked by async task)
|
||||
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
|
@ -1248,7 +1253,7 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback)
|
|||
} else if (state._cleanupIntervalStep // if enabled
|
||||
&& state._cleanupIntervalCount++ > state._cleanupIntervalStep) {
|
||||
state._cleanupIntervalCount = 0; // reset counter
|
||||
res = cleanup(); // run cleanup
|
||||
res = cleanupUnsafe(); // run cleanup ('_asyncSelf' locked by async task)
|
||||
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC(WARN, arangodb::iresearch::TOPIC)
|
||||
|
|
|
@ -106,7 +106,7 @@ class IResearchLink {
|
|||
arangodb::LogicalCollection& collection() const noexcept; // arangodb::Index override
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief mark the current data store state as te latest valid state
|
||||
/// @brief mark the current data store state as the latest valid state
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result commit();
|
||||
|
||||
|
@ -270,13 +270,21 @@ class IResearchLink {
|
|||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief run filesystem cleanup on the data store
|
||||
/// @note assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result cleanup();
|
||||
arangodb::Result cleanupUnsafe();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief mark the current data store state as the latest valid state
|
||||
/// @note assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result commitUnsafe();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief run segment consolidation on the data store
|
||||
/// @note assumes that '_asyncSelf' is read-locked (for use with async tasks)
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
arangodb::Result consolidate( // consolidate segments
|
||||
arangodb::Result consolidateUnsafe( // consolidate segments
|
||||
IResearchViewMeta::ConsolidationPolicy const& policy, // policy to apply
|
||||
irs::merge_writer::flush_progress_t const& progress // policy progress to use
|
||||
);
|
||||
|
|
|
@ -190,7 +190,7 @@ IResearchViewMeta::Mask::Mask(bool mask /*=false*/) noexcept
|
|||
|
||||
IResearchViewMeta::IResearchViewMeta()
|
||||
: _cleanupIntervalStep(10),
|
||||
_commitIntervalMsec(60 * 1000),
|
||||
_commitIntervalMsec(1000),
|
||||
_consolidationIntervalMsec(60 * 1000),
|
||||
_locale(std::locale::classic()),
|
||||
_version(LATEST_VERSION),
|
||||
|
|
|
@ -197,7 +197,7 @@ class MMFilesFlushMarker final: public arangodb::MMFilesWalMarker {
|
|||
|
||||
auto* data = reinterpret_cast<uint8_t const*>(&marker);
|
||||
auto* ptr = data + sizeof(MMFilesMarker);
|
||||
auto* end = ptr + marker.getSize();
|
||||
auto* end = ptr + marker.getSize() - sizeof(MMFilesMarker);
|
||||
|
||||
if (sizeof(TRI_voc_tick_t) > size_t(end - ptr)) {
|
||||
THROW_ARANGO_EXCEPTION(arangodb::Result( // exception
|
||||
|
@ -550,7 +550,9 @@ namespace arangodb {
|
|||
std::atomic<bool> FlushFeature::_isRunning(false);
|
||||
|
||||
FlushFeature::FlushFeature(application_features::ApplicationServer& server)
|
||||
: ApplicationFeature(server, "Flush"), _flushInterval(1000000) {
|
||||
: ApplicationFeature(server, "Flush"),
|
||||
_flushInterval(1000000),
|
||||
_stopped(false) {
|
||||
setOptional(true);
|
||||
startsAfter("BasicsPhase");
|
||||
|
||||
|
@ -604,7 +606,7 @@ std::shared_ptr<FlushFeature::FlushSubscription> FlushFeature::registerFlushSubs
|
|||
);
|
||||
std::lock_guard<std::mutex> lock(_flushSubscriptionsMutex);
|
||||
|
||||
if (!_isRunning.load()) {
|
||||
if (_stopped) {
|
||||
LOG_TOPIC(ERR, Logger::FLUSH)
|
||||
<< "FlushFeature not running";
|
||||
|
||||
|
@ -642,7 +644,7 @@ std::shared_ptr<FlushFeature::FlushSubscription> FlushFeature::registerFlushSubs
|
|||
);
|
||||
std::lock_guard<std::mutex> lock(_flushSubscriptionsMutex);
|
||||
|
||||
if (!_isRunning.load()) {
|
||||
if (_stopped) {
|
||||
LOG_TOPIC(ERR, Logger::FLUSH)
|
||||
<< "FlushFeature not running";
|
||||
|
||||
|
@ -806,6 +808,7 @@ void FlushFeature::stop() {
|
|||
// release any remaining flush subscriptions so that they may get deallocated ASAP
|
||||
// subscriptions could survive after FlushFeature::stop(), e.g. DatabaseFeature::unprepare()
|
||||
_flushSubscriptions.clear();
|
||||
_stopped = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -112,6 +112,7 @@ class FlushFeature final : public application_features::ApplicationFeature {
|
|||
basics::ReadWriteLock _threadLock;
|
||||
std::unordered_set<std::shared_ptr<FlushSubscriptionBase>> _flushSubscriptions;
|
||||
std::mutex _flushSubscriptionsMutex;
|
||||
bool _stopped;
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
File diff suppressed because one or more lines are too long
|
@ -358,7 +358,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -374,7 +374,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -390,7 +390,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -406,7 +406,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -422,7 +422,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -438,7 +438,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -464,7 +464,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -480,7 +480,7 @@ SECTION("test_flush_marker") {
|
|||
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
|
|
@ -69,7 +69,7 @@ SECTION("test_defaults") {
|
|||
|
||||
CHECK((true == metaState._collections.empty()));
|
||||
CHECK(true == (10 == meta._cleanupIntervalStep));
|
||||
CHECK((true == (60 * 1000 == meta._commitIntervalMsec)));
|
||||
CHECK((true == (1000 == meta._commitIntervalMsec)));
|
||||
CHECK(true == (60 * 1000 == meta._consolidationIntervalMsec));
|
||||
CHECK((std::string("bytes_accum") == meta._consolidationPolicy.properties().get("type").copyString()));
|
||||
CHECK((false == !meta._consolidationPolicy.policy()));
|
||||
|
@ -132,7 +132,7 @@ SECTION("test_readDefaults") {
|
|||
CHECK((true == metaState.init(json->slice(), tmpString)));
|
||||
CHECK((true == metaState._collections.empty()));
|
||||
CHECK(10 == meta._cleanupIntervalStep);
|
||||
CHECK((60 * 1000 == meta._commitIntervalMsec));
|
||||
CHECK((1000 == meta._commitIntervalMsec));
|
||||
CHECK(60 * 1000 == meta._consolidationIntervalMsec);
|
||||
CHECK((std::string("bytes_accum") == meta._consolidationPolicy.properties().get("type").copyString()));
|
||||
CHECK((false == !meta._consolidationPolicy.policy()));
|
||||
|
@ -301,7 +301,7 @@ SECTION("test_writeDefaults") {
|
|||
tmpSlice = slice.get("cleanupIntervalStep");
|
||||
CHECK((true == tmpSlice.isNumber<size_t>() && 10 == tmpSlice.getNumber<size_t>()));
|
||||
tmpSlice = slice.get("commitIntervalMsec");
|
||||
CHECK((true == tmpSlice.isNumber<size_t>() && 60000 == tmpSlice.getNumber<size_t>()));
|
||||
CHECK((true == tmpSlice.isNumber<size_t>() && 1000 == tmpSlice.getNumber<size_t>()));
|
||||
tmpSlice = slice.get("consolidationIntervalMsec");
|
||||
CHECK((true == tmpSlice.isNumber<size_t>() && 60000 == tmpSlice.getNumber<size_t>()));
|
||||
tmpSlice = slice.get("consolidationPolicy");
|
||||
|
|
|
@ -166,7 +166,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -206,7 +206,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -246,7 +246,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -287,7 +287,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -329,7 +329,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(42), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -371,7 +371,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
@ -413,7 +413,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
|
|||
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
|
||||
buf.append(json->slice().begin(), json->slice().byteSize());
|
||||
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
|
||||
marker->setSize(buf.size() - sizeof(::MMFilesMarker));
|
||||
marker->setSize(buf.size());
|
||||
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
|
||||
arangodb::MMFilesWalRecoverState state(false);
|
||||
CHECK((0 == state.errorCount));
|
||||
|
|
|
@ -209,7 +209,7 @@ function IResearchFeatureDDLTestSuite () {
|
|||
properties = view.properties();
|
||||
assertTrue(Object === properties.constructor);
|
||||
assertEqual(10, properties.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commitIntervalMsec);
|
||||
assertEqual(1000, properties.commitIntervalMsec);
|
||||
assertEqual(60000, properties.consolidationIntervalMsec);
|
||||
assertTrue(Object === properties.consolidationPolicy.constructor);
|
||||
assertEqual(2, Object.keys(properties.consolidationPolicy).length);
|
||||
|
@ -240,7 +240,7 @@ function IResearchFeatureDDLTestSuite () {
|
|||
properties = view.properties();
|
||||
assertTrue(Object === properties.constructor);
|
||||
assertEqual(20, properties.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commitIntervalMsec);
|
||||
assertEqual(1000, properties.commitIntervalMsec);
|
||||
assertEqual(60000, properties.consolidationIntervalMsec);
|
||||
assertTrue(Object === properties.consolidationPolicy.constructor);
|
||||
assertEqual(2, Object.keys(properties.consolidationPolicy).length);
|
||||
|
@ -679,7 +679,7 @@ function IResearchFeatureDDLTestSuite () {
|
|||
properties = view.properties();
|
||||
assertTrue(Object === properties.constructor);
|
||||
assertEqual(10, properties.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commitIntervalMsec);
|
||||
assertEqual(1000, properties.commitIntervalMsec);
|
||||
assertEqual(60000, properties.consolidationIntervalMsec);
|
||||
assertTrue(Object === properties.consolidationPolicy.constructor);
|
||||
assertEqual(2, Object.keys(properties.consolidationPolicy).length);
|
||||
|
@ -811,7 +811,8 @@ function IResearchFeatureDDLTestSuite () {
|
|||
|
||||
assertEqual(db._views().length, 0);
|
||||
assertEqual(db[viewName], undefined);
|
||||
}
|
||||
},
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -209,7 +209,7 @@ function IResearchFeatureDDLTestSuite () {
|
|||
properties = view.properties();
|
||||
assertTrue(Object === properties.constructor);
|
||||
assertEqual(10, properties.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commitIntervalMsec);
|
||||
assertEqual(1000, properties.commitIntervalMsec);
|
||||
assertEqual(60000, properties.consolidationIntervalMsec);
|
||||
assertTrue(Object === properties.consolidationPolicy.constructor);
|
||||
assertEqual(2, Object.keys(properties.consolidationPolicy).length);
|
||||
|
@ -240,7 +240,7 @@ function IResearchFeatureDDLTestSuite () {
|
|||
properties = view.properties();
|
||||
assertTrue(Object === properties.constructor);
|
||||
assertEqual(20, properties.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commitIntervalMsec);
|
||||
assertEqual(1000, properties.commitIntervalMsec);
|
||||
assertEqual(60000, properties.consolidationIntervalMsec);
|
||||
assertTrue(Object === properties.consolidationPolicy.constructor);
|
||||
assertEqual(2, Object.keys(properties.consolidationPolicy).length);
|
||||
|
@ -679,7 +679,7 @@ function IResearchFeatureDDLTestSuite () {
|
|||
properties = view.properties();
|
||||
assertTrue(Object === properties.constructor);
|
||||
assertEqual(10, properties.cleanupIntervalStep);
|
||||
assertEqual(60000, properties.commitIntervalMsec);
|
||||
assertEqual(1000, properties.commitIntervalMsec);
|
||||
assertEqual(60000, properties.consolidationIntervalMsec);
|
||||
assertTrue(Object === properties.consolidationPolicy.constructor);
|
||||
assertEqual(2, Object.keys(properties.consolidationPolicy).length);
|
||||
|
|
Loading…
Reference in New Issue