1
0
Fork 0

issue 555.1.1: issue 555.1.1: try loading previous data store snapshots if checkpoint file unavailable, ehnace error message, address some warnings (#8975)

This commit is contained in:
Vasiliy 2019-05-13 18:33:44 +03:00 committed by Andrey Abramov
parent 404bcc8b0c
commit 12bd4c785b
10 changed files with 369 additions and 178 deletions

View File

@ -173,13 +173,14 @@ std::tuple<ExecutionState, EnumerateCollectionStats, size_t> EnumerateCollection
std::tie(_state, _input) = _fetcher.fetchRow();
if (_state == ExecutionState::WAITING) {
return {_state, stats, 0};
return std::make_tuple(_state, stats, 0); // tupple, cannot use initializer list due to build failure
}
if (!_input) {
TRI_ASSERT(_state == ExecutionState::DONE);
return {_state, stats, 0};
return std::make_tuple(_state, stats, 0); // tupple, cannot use initializer list due to build failure
}
_cursor->reset();
_cursorHasMore = _cursor->hasMore();
}
@ -192,10 +193,10 @@ std::tuple<ExecutionState, EnumerateCollectionStats, size_t> EnumerateCollection
stats.incrScanned(actuallySkipped);
if (_state == ExecutionState::DONE && !_cursorHasMore) {
return {ExecutionState::DONE, stats, actuallySkipped};
return std::make_tuple(ExecutionState::DONE, stats, actuallySkipped); // tupple, cannot use initializer list due to build failure
}
return {ExecutionState::HASMORE, stats, actuallySkipped};
return std::make_tuple(ExecutionState::HASMORE, stats, actuallySkipped); // tupple, cannot use initializer list due to build failure
}
void EnumerateCollectionExecutor::initializeCursor() {

View File

@ -229,7 +229,7 @@ struct ExecuteSkipVariant<SkipVariants::FETCHER> {
static std::tuple<ExecutionState, typename Executor::Stats, size_t> executeSkip(
Executor& executor, typename Executor::Fetcher& fetcher, size_t toSkip) {
auto res = fetcher.skipRows(toSkip);
return {res.first, typename Executor::Stats{}, res.second};
return std::make_tuple(res.first, typename Executor::Stats{}, res.second); // tupple, cannot use initializer list due to build failure
}
};
@ -250,7 +250,7 @@ struct ExecuteSkipVariant<SkipVariants::DEFAULT> {
// this function should never be executed
TRI_ASSERT(false);
// Make MSVC happy:
return {ExecutionState::DONE, {}, 0};
return std::make_tuple(ExecutionState::DONE, typename Executor::Stats{}, 0); // tupple, cannot use initializer list due to build failure
}
};

View File

@ -294,17 +294,17 @@ IResearchViewExecutorBase<Impl, Traits>::skipRows(size_t toSkip) {
if (!_inputRow.isInitialized()) {
if (_upstreamState == ExecutionState::DONE) {
// There will be no more rows, stop fetching.
return {ExecutionState::DONE, stats, 0};
return std::make_tuple(ExecutionState::DONE, stats, 0); // tupple, cannot use initializer list due to build failure
}
std::tie(_upstreamState, _inputRow) = _fetcher.fetchRow();
if (_upstreamState == ExecutionState::WAITING) {
return {_upstreamState, stats, 0};
return std::make_tuple(_upstreamState, stats, 0); // tupple, cannot use initializer list due to build failure
}
if (!_inputRow.isInitialized()) {
return {ExecutionState::DONE, stats, 0};
return std::make_tuple(ExecutionState::DONE, stats, 0); // tupple, cannot use initializer list due to build failure
}
// reset must be called exactly after we've got a new and valid input row.
@ -321,7 +321,7 @@ IResearchViewExecutorBase<Impl, Traits>::skipRows(size_t toSkip) {
_inputRow = InputAqlItemRow{CreateInvalidInputRowHint{}};
}
return {ExecutionState::HASMORE, stats, skipped};
return std::make_tuple(ExecutionState::HASMORE, stats, skipped); // tupple, cannot use initializer list due to build failure
}
template <typename Impl, typename Traits>

View File

@ -537,21 +537,25 @@ std::tuple<ExecutionState, IndexExecutor::Stats, size_t> IndexExecutor::skipRows
if (!_input) {
if (_state == ExecutionState::DONE) {
size_t skipped = _skipped;
_skipped = 0;
return {_state, stats, skipped};
return std::make_tuple(_state, stats, skipped); // tupple, cannot use initializer list due to build failure
}
std::tie(_state, _input) = _fetcher.fetchRow();
if (_state == ExecutionState::WAITING) {
return {_state, stats, 0};
return std::make_tuple(_state, stats, 0); // tupple, cannot use initializer list due to build failure
}
if (!_input) {
TRI_ASSERT(_state == ExecutionState::DONE);
size_t skipped = _skipped;
_skipped = 0;
return {_state, stats, skipped};
return std::make_tuple(_state, stats, skipped); // tupple, cannot use initializer list due to build failure
}
initIndexes(_input);
@ -577,10 +581,12 @@ std::tuple<ExecutionState, IndexExecutor::Stats, size_t> IndexExecutor::skipRows
}
size_t skipped = _skipped;
_skipped = 0;
if (_state == ExecutionState::DONE && !_input) {
return {ExecutionState::DONE, stats, skipped};
} else {
return {ExecutionState::HASMORE, stats, skipped};
return std::make_tuple(ExecutionState::DONE, stats, skipped); // tupple, cannot use initializer list due to build failure
}
return std::make_tuple(ExecutionState::HASMORE, stats, skipped); // tupple, cannot use initializer list due to build failure
}

View File

@ -50,6 +50,12 @@
namespace {
////////////////////////////////////////////////////////////////////////////////
/// @brief the suffix appened to the index_meta filename to generate the
/// backup filename to be used for renaming
////////////////////////////////////////////////////////////////////////////////
const irs::string_ref IRESEARCH_BACKUP_SUFFIX(".backup");
////////////////////////////////////////////////////////////////////////////////
/// @brief the suffix appened to the index_meta filename to generate the
/// corresponding checkpoint file
@ -1003,7 +1009,7 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback,
try {
recovery_reader = irs::directory_reader::open(*(_dataStore._directory));
} catch (irs::index_not_found const&) {
// ingore
// ignore
}
}
@ -1011,19 +1017,54 @@ arangodb::Result IResearchLink::initDataStore(InitCallback const& initCallback,
// '.checkpoint' file for the last state of the data store
// if it's missing them probably the WAL tail was lost
if (recovery_reader) {
auto& checkpoint = recovery_reader.meta().filename;
auto checkpointFile = checkpoint + std::string(IRESEARCH_CHECKPOINT_SUFFIX);
auto ref = irs::directory_utils::reference( // create a reference
irs::index_file_refs::ref_t ref;
// find the latest segment state with a checkpoint file
for(;;) {
auto& filename = recovery_reader.meta().filename; // segment state filename
auto checkpointFile = // checkpoint filename
filename + std::string(IRESEARCH_CHECKPOINT_SUFFIX);
ref = irs::directory_utils::reference( // create a reference
*(_dataStore._directory), checkpointFile, false // args
);
if (!ref) {
if (ref) {
break; // found checkpoint file for latest state
}
auto src = _dataStore._path;
auto& srcFilename = filename;
auto dst = src;
auto dstFilename = filename + std::string(IRESEARCH_BACKUP_SUFFIX);
src /= srcFilename;
dst /= dstFilename;
// move segment state file without a matching checkpint out of the way
if (!src.rename(dst)) {
return arangodb::Result( // result
TRI_ERROR_ARANGO_ILLEGAL_STATE, // code
std::string("failed rename the latest data store state file for arangosearch link '") + std::to_string(id()) + "', source '" + srcFilename + "' destination '" + dstFilename + "' in path: " + _dataStore._path.utf8()
);
}
try {
recovery_reader.reset(); // unset to allow for checking for success below
recovery_reader = irs::directory_reader::open(*(_dataStore._directory)); // retry opening
} catch (irs::index_not_found const&) {
// ignore
}
if (!recovery_reader) {
return arangodb::Result( // result
TRI_ERROR_ARANGO_ILLEGAL_STATE, // code
std::string("failed to find checkpoint file matching the latest data store state for arangosearch link '") + std::to_string(id()) + "', expecting file '" + checkpointFile + "' in path: " + _dataStore._path.utf8()
);
}
}
auto& checkpointFile = *ref; // ref non-null ensured by above loop
auto in = _dataStore._directory->open( // open checkpoint file
checkpointFile, irs::IOAdvice::NORMAL // args, use 'NORMAL' since the file could be empty
);

View File

@ -103,20 +103,29 @@ arangodb::Result canUseAnalyzers( // validate
return arangodb::Result();
}
bool createLink( // create link
arangodb::Result createLink( // create link
arangodb::LogicalCollection& collection, // link collection
arangodb::LogicalView const& view, // link view
arangodb::velocypack::Slice definition // link definition
) {
try {
bool isNew = false;
auto link = collection.createIndex(definition, isNew);
LOG_TOPIC_IF("2c861", DEBUG, arangodb::iresearch::TOPIC, link)
<< "added link '" << link->id() << "'";
return link && isNew;
if (!(link && isNew)) {
return arangodb::Result( // result
TRI_ERROR_INTERNAL, // code
std::string("failed to create link between arangosearch view '") + view.name() + "' and collection '" + collection.name() + "'"
);
}
} catch (arangodb::basics::Exception const& e) {
return arangodb::Result(e.code(), e.what());
}
return arangodb::Result();
}
bool createLink( // create link
arangodb::Result createLink( // create link
arangodb::LogicalCollection& collection, // link collection
arangodb::iresearch::IResearchViewCoordinator const& view, // link view
arangodb::velocypack::Slice definition // link definition
@ -141,7 +150,10 @@ bool createLink( // create link
);
if (!arangodb::iresearch::mergeSliceSkipKeys(builder, definition, acceptor)) {
return false;
return arangodb::Result( // result
TRI_ERROR_INTERNAL, // code
std::string("failed to generate definition while creating link between arangosearch view '") + view.name() + "' and collection '" + collection.name() + "'"
);
}
builder.close();
@ -150,20 +162,27 @@ bool createLink( // create link
return arangodb::methods::Indexes::ensureIndex( // ensure index
&collection, builder.slice(), true, tmp // args
).ok();
);
}
template<typename ViewType>
bool dropLink( // drop link
arangodb::Result dropLink( // drop link
arangodb::LogicalCollection& collection, // link collection
arangodb::iresearch::IResearchLink const& link // link to drop
) {
// don't need to create an extra transaction inside arangodb::methods::Indexes::drop(...)
return collection.dropIndex(link.id());
if (!collection.dropIndex(link.id())) {
return arangodb::Result( // result
TRI_ERROR_INTERNAL, // code
std::string("failed to drop link '") + std::to_string(link.id()) + "' from collection '" + collection.name() + "'"
);
}
return arangodb::Result();
}
template<>
bool dropLink<arangodb::iresearch::IResearchViewCoordinator>( // drop link
arangodb::Result dropLink<arangodb::iresearch::IResearchViewCoordinator>( // drop link
arangodb::LogicalCollection& collection, // link collection
arangodb::iresearch::IResearchLink const& link // link to drop
) {
@ -176,7 +195,7 @@ bool dropLink<arangodb::iresearch::IResearchViewCoordinator>( // drop link
);
builder.close();
return arangodb::methods::Indexes::drop(&collection, builder.slice()).ok();
return arangodb::methods::Indexes::drop(&collection, builder.slice());
}
template <typename ViewType>
@ -195,17 +214,15 @@ arangodb::Result modifyLinks( // modify links
struct State {
std::shared_ptr<arangodb::LogicalCollection> _collection;
size_t _collectionsToLockOffset; // std::numeric_limits<size_t>::max() ==
// removal only
size_t _collectionsToLockOffset; // std::numeric_limits<size_t>::max() == removal only
std::shared_ptr<arangodb::iresearch::IResearchLink> _link;
size_t _linkDefinitionsOffset;
arangodb::Result _result; // operation result
bool _stale = false; // request came from the stale list
bool _valid = true;
explicit State(size_t collectionsToLockOffset)
: State(collectionsToLockOffset, std::numeric_limits<size_t>::max()) {}
State(size_t collectionsToLockOffset, size_t linkDefinitionsOffset)
: _collectionsToLockOffset(collectionsToLockOffset),
_linkDefinitionsOffset(linkDefinitionsOffset) {}
: _collectionsToLockOffset(collectionsToLockOffset), _linkDefinitionsOffset(linkDefinitionsOffset) {}
};
std::vector<std::string> collectionsToLock;
std::vector<std::pair<arangodb::velocypack::Builder, arangodb::iresearch::IResearchLinkMeta>> linkDefinitions;
@ -215,12 +232,10 @@ arangodb::Result modifyLinks( // modify links
auto collection = linksItr.key();
if (!collection.isString()) {
return arangodb::Result(TRI_ERROR_BAD_PARAMETER,
std::string(
"error parsing link parameters from json for "
"arangosearch view '") +
view.name() + "' offset '" +
arangodb::basics::StringUtils::itoa(linksItr.index()) + '"');
return arangodb::Result( // result
TRI_ERROR_BAD_PARAMETER, // code
std::string("error parsing link parameters from json for arangosearch view '") + view.name() + "' offset '" + arangodb::basics::StringUtils::itoa(linksItr.index()) + '"'
);
}
auto link = linksItr.value();
@ -465,19 +480,20 @@ arangodb::Result modifyLinks( // modify links
// execute removals
for (auto& state : linkModifications) {
if (state._link) { // link removal or recreate request
LOG_TOPIC("9da74", DEBUG, arangodb::iresearch::TOPIC)
<< "removed link '" << state._link->id() << "'";
state._valid = dropLink<ViewType>(*(state._collection), *(state._link));
state._result = dropLink<ViewType>(*(state._collection), *(state._link));
modified.emplace(state._collection->id());
}
}
// execute additions
for (auto& state : linkModifications) {
if (state._valid && state._linkDefinitionsOffset < linkDefinitions.size()) {
state._valid =
createLink(*(state._collection), view,
linkDefinitions[state._linkDefinitionsOffset].first.slice());
for (auto& state: linkModifications) {
if (state._result.ok() // valid state (unmodified or after removal)
&& state._linkDefinitionsOffset < linkDefinitions.size()) {
state._result = createLink( // create link
*(state._collection), // collection
view, // view
linkDefinitions[state._linkDefinitionsOffset].first.slice() // definition
);
modified.emplace(state._collection->id());
}
}
@ -485,9 +501,12 @@ arangodb::Result modifyLinks( // modify links
std::string error;
// validate success
for (auto& state : linkModifications) {
if (!state._valid) {
error.append(error.empty() ? "" : ", ").append(collectionsToLock[state._collectionsToLockOffset]);
for (auto& state: linkModifications) {
if (!state._result.ok()) {
error.append(error.empty() ? "" : ", ") // separator
.append(collectionsToLock[state._collectionsToLockOffset]) // collection name
.append(": ").append(std::to_string(state._result.errorNumber())) // error code
.append(" ").append(state._result.errorMessage()); // error message
}
}
@ -495,11 +514,10 @@ arangodb::Result modifyLinks( // modify links
return arangodb::Result();
}
return arangodb::Result(
TRI_ERROR_ARANGO_ILLEGAL_STATE,
std::string("failed to update links while updating arangosearch view '") +
view.name() +
"', retry same request or examine errors for collections: " + error);
return arangodb::Result( // result
TRI_ERROR_ARANGO_ILLEGAL_STATE, // code
std::string("failed to update links while updating arangosearch view '") + view.name() + "', retry same request or examine errors for collections: " + error
);
}
} // namespace

View File

@ -561,11 +561,11 @@ void JS_List(v8::FunctionCallbackInfo<v8::Value> const& args) {
for (size_t i = 0, count = result.size(); i < count; ++i) {
auto analyzer = WrapAnalyzer(isolate, result[i]);
if (analyzer.IsEmpty()) {
if (analyzer.IsEmpty() || i > std::numeric_limits<uint32_t>::max()) {
TRI_V8_THROW_EXCEPTION_MEMORY();
}
v8Result->Set(i, analyzer);
v8Result->Set(static_cast<uint32_t>(i), analyzer); // cast safe because of check above
}
TRI_V8_RETURN(v8Result);

View File

@ -366,7 +366,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -382,7 +382,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -398,7 +398,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -414,7 +414,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -430,7 +430,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -446,7 +446,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -472,7 +472,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -488,7 +488,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -496,18 +496,118 @@ SECTION("test_flush_marker") {
CHECK((0 == state.errorCount));
}
// open existing without checkpoint file
// will commit 'link' and set RecoveryState to DONE
{
auto linkJson1 = arangodb::velocypack::Parser::fromJson("{ \"id\": 43, \"includeAllFields\": true, \"type\": \"arangosearch\", \"view\": \"testView\" }");
StorageEngineMock::inRecoveryResult = false;
dbFeature->recoveryDone();
}
// commit failed write WAL
{
auto before = StorageEngineMock::flushSubscriptionResult;
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::flushSubscriptionResult = before; });
StorageEngineMock::flushSubscriptionResult = arangodb::Result(TRI_ERROR_INTERNAL);
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
CHECK((!link->commit().ok()));
}
// commit failed write checkpoint
{
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
irs::utf8_path path;
path /= s.testFilesystemPath;
path /= "databases";
path /= std::string("database-") + std::to_string(vocbase->id());
path /= std::string("arangosearch-") + std::to_string(logicalCollection->id()) + "_" + std::to_string(link->id());
path /= "segments_3.checkpoint";
CHECK((path.mkdir())); //create a directory by same name as the checkpoint file to force error
CHECK((!link->commit().ok()));
}
// commit success
{
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link->insert(trx, arangodb::LocalDocumentId(3), doc2->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
CHECK((link->commit().ok()));
}
}
SECTION("test_flush_marker_reopen") {
static std::vector<std::string> const EMPTY;
auto doc0 = arangodb::velocypack::Parser::fromJson("{ \"abc\": \"def\" }");
auto before = StorageEngineMock::inRecoveryResult;
StorageEngineMock::inRecoveryResult = true;
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::inRecoveryResult = before; });
auto* dbFeature = arangodb::application_features::ApplicationServer::lookupFeature<arangodb::DatabaseFeature>("Database");
REQUIRE((dbFeature));
TRI_vocbase_t* vocbase;
REQUIRE((TRI_ERROR_NO_ERROR == dbFeature->createDatabase(1, "testDatabase", vocbase)));
auto collectionJson = arangodb::velocypack::Parser::fromJson("{ \"name\": \"testCollection\", \"id\": 100 }");
auto logicalCollection = vocbase->createCollection(collectionJson->slice());
REQUIRE((false == !logicalCollection));
bool created;
// open existing without any checkpoint files
{
auto linkJson1 = arangodb::velocypack::Parser::fromJson("{ \"id\": 42, \"includeAllFields\": true, \"type\": \"arangosearch\", \"view\": \"testView\" }");
// initial population of link
{
auto before = StorageEngineMock::inRecoveryResult;
StorageEngineMock::inRecoveryResult = false;
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::inRecoveryResult = before; });
std::shared_ptr<arangodb::Index> index1;
CHECK((arangodb::iresearch::IResearchMMFilesLink::factory().instantiate(index1, *logicalCollection, linkJson1->slice(), 43, false).ok()));
CHECK((arangodb::iresearch::IResearchMMFilesLink::factory().instantiate(index1, *logicalCollection, linkJson1->slice(), 42, false).ok()));
CHECK((false == !index1));
auto link1 = std::dynamic_pointer_cast<arangodb::iresearch::IResearchLink>(index1);
CHECK((false == !link1));
// remove initial 'checkpoint' file
{
irs::utf8_path path;
path /= s.testFilesystemPath;
path /= "databases";
path /= std::string("database-") + std::to_string(vocbase->id());
path /= std::string("arangosearch-") + std::to_string(logicalCollection->id()) + "_42";
irs::fs_directory dir(path.utf8());
auto reader = irs::directory_reader::open(dir);
path /= reader.meta().filename + ".checkpoint";
bool exists;
CHECK((path.exists_file(exists) && exists));
CHECK((path.remove()));
}
// insert doc0
{
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
@ -518,12 +618,61 @@ SECTION("test_flush_marker") {
CHECK((trx.begin().ok()));
CHECK((link1->insert(trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
CHECK((link1->commit().ok()));
}
// remove initial 'checkpoint' file
{
irs::utf8_path path;
path /= s.testFilesystemPath;
path /= "databases";
path /= std::string("database-") + std::to_string(vocbase->id());
path /= std::string("arangosearch-") + std::to_string(logicalCollection->id()) + "_42";
irs::fs_directory dir(path.utf8());
auto reader = irs::directory_reader::open(dir);
path /= reader.meta().filename + ".checkpoint";
bool exists;
CHECK((path.exists_file(exists) && exists));
CHECK((path.remove())); // remove post-commit 'checkpoint' file
}
}
auto index1 = logicalCollection->createIndex(linkJson1->slice(), created);
CHECK((true == !index1));
}
// open existing without last checkpoint file
{
auto linkJson1 = arangodb::velocypack::Parser::fromJson("{ \"id\": 43, \"includeAllFields\": true, \"type\": \"arangosearch\", \"view\": \"testView\" }");
// initial population of link
{
auto before = StorageEngineMock::inRecoveryResult;
StorageEngineMock::inRecoveryResult = false;
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::inRecoveryResult = before; });
dbFeature->recoveryDone(); // will commit 'link1' (it will also commit 'link' and set RecoveryState to DONE)
std::shared_ptr<arangodb::Index> index1;
CHECK((arangodb::iresearch::IResearchMMFilesLink::factory().instantiate(index1, *logicalCollection, linkJson1->slice(), 43, false).ok()));
CHECK((false == !index1));
auto link1 = std::dynamic_pointer_cast<arangodb::iresearch::IResearchLink>(index1);
CHECK((false == !link1));
// insert doc0
{
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link1->insert(trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
CHECK((link1->commit().ok()));
}
// remove initial 'checkpoint' file
{
irs::utf8_path path;
path /= s.testFilesystemPath;
path /= "databases";
@ -534,9 +683,34 @@ SECTION("test_flush_marker") {
path /= reader.meta().filename + ".checkpoint";
bool exists;
CHECK((path.exists_file(exists) && exists));
CHECK((path.remove()));
CHECK((path.remove())); // remove post-commit 'checkpoint' file
}
}
auto index1 = logicalCollection->createIndex(linkJson1->slice(), created);
CHECK((true == !index1));
CHECK((false == !index1)); // link creation success in recovery
// first and only marker
{
auto json = arangodb::velocypack::Parser::fromJson("{ \"type\": \"arangosearch\", \"data\": { \"cid\": 100, \"iid\": 45, \"value\": \"segments_1\" } }");
std::basic_string<uint8_t> buf;
buf.resize(sizeof(::MMFilesMarker) + sizeof(TRI_voc_tick_t)); // reserve space for header
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
CHECK((arangodb::MMFilesWalRecoverState::ReplayMarker(marker, &state, nullptr)));
CHECK((0 == state.errorCount));
}
auto before = StorageEngineMock::inRecoveryResult;
StorageEngineMock::inRecoveryResult = false;
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::inRecoveryResult = before; });
dbFeature->recoveryDone(); // will commit 'link1' (it will also commit 'link' and set RecoveryState to DONE)
logicalCollection->dropIndex(index1->id());
}
// open existing with checkpoint file unmatched by marker (missing WAL tail)
@ -622,7 +796,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -632,6 +806,7 @@ SECTION("test_flush_marker") {
StorageEngineMock::inRecoveryResult = false;
dbFeature->recoveryDone(); // will commit 'link1' (it will also commit 'link' and set RecoveryState to DONE)
logicalCollection->dropIndex(index1->id());
}
// open exisiting with checkpoint file matching second marker (i.e. DURING_CHECKPOINT then again DURING_CHECKPOINT)
@ -677,7 +852,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -693,7 +868,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -703,6 +878,7 @@ SECTION("test_flush_marker") {
StorageEngineMock::inRecoveryResult = false;
dbFeature->recoveryDone(); // will commit 'link1' (it will also commit 'link' and set RecoveryState to DONE)
logicalCollection->dropIndex(index1->id());
}
// open exisiting with checkpoint file matching third marker (ensure initial recovery state changes, i.e. DURING_CHECKPOINT then BEFORE_CHECKPOINT then DURING_CHECKPOINT))
@ -748,7 +924,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -764,7 +940,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -780,7 +956,7 @@ SECTION("test_flush_marker") {
arangodb::encoding::storeNumber(&buf[sizeof(::MMFilesMarker)], TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -790,62 +966,7 @@ SECTION("test_flush_marker") {
StorageEngineMock::inRecoveryResult = false;
dbFeature->recoveryDone(); // will commit 'link1' (it will also commit 'link' and set RecoveryState to DONE)
}
// commit failed write WAL
{
auto before = StorageEngineMock::flushSubscriptionResult;
auto restore = irs::make_finally([&before]()->void { StorageEngineMock::flushSubscriptionResult = before; });
StorageEngineMock::flushSubscriptionResult = arangodb::Result(TRI_ERROR_INTERNAL);
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link->insert(trx, arangodb::LocalDocumentId(1), doc0->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
CHECK((!link->commit().ok()));
}
// commit failed write checkpoint
{
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link->insert(trx, arangodb::LocalDocumentId(2), doc1->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
irs::utf8_path path;
path /= s.testFilesystemPath;
path /= "databases";
path /= std::string("database-") + std::to_string(vocbase->id());
path /= std::string("arangosearch-") + std::to_string(logicalCollection->id()) + "_" + std::to_string(link->id());
path /= "segments_3.checkpoint";
CHECK((path.mkdir())); //create a directory by same name as the checkpoint file to force error
CHECK((!link->commit().ok()));
}
// commit success
{
arangodb::transaction::Methods trx(
arangodb::transaction::StandaloneContext::Create(*vocbase),
EMPTY,
EMPTY,
EMPTY,
arangodb::transaction::Options()
);
CHECK((trx.begin().ok()));
CHECK((link->insert(trx, arangodb::LocalDocumentId(3), doc2->slice(), arangodb::Index::OperationMode::normal).ok()));
CHECK((trx.commit().ok()));
CHECK((link->commit().ok()));
logicalCollection->dropIndex(index1->id());
}
}
@ -1242,6 +1363,10 @@ SECTION("test_write") {
CHECK_THROWS((reader.reopen()));
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
}
// -----------------------------------------------------------------------------

View File

@ -161,7 +161,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -201,7 +201,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -241,7 +241,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -282,7 +282,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -324,7 +324,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(42), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -366,7 +366,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));
@ -408,7 +408,7 @@ TEST_CASE("FlushFeature", "[serverfeature][serverfeature-flush]") {
TRI_voc_tick_t(1), sizeof(TRI_voc_tick_t));
buf.append(json->slice().begin(), json->slice().byteSize());
auto* marker = reinterpret_cast<MMFilesMarker*>(&buf[0]);
marker->setSize(buf.size());
marker->setSize(static_cast<uint32_t>(buf.size()));
marker->setType(::MMFilesMarkerType::TRI_DF_MARKER_VPACK_FLUSH_SYNC);
arangodb::MMFilesWalRecoverState state(false);
CHECK((0 == state.errorCount));

View File

@ -77,7 +77,7 @@ function recoverySuite () {
view.properties(meta);
fail();
} catch (e) {
assertEqual(e.errorNum, errors.ERROR_ARANGO_INDEX_CREATION_FAILED.code);
assertEqual(e.errorNum, errors.ERROR_ARANGO_ILLEGAL_STATE.code);
}
var links = view.properties().links;