mirror of https://gitee.com/bigwinds/arangodb
* Bug fix/issue #9612 (#9764) * Fixed ViewExecutionNode retrieval with deleted documents present in view * Ported solution from 3.4 branch * Changed index store in collection from vector to set. To make reversable indexes always last to execute * Fixed re-enter hung * Index storage fix * Made index order deterministic * Fix Mac build * Added tests for index reversal * Fixed Mac build * Code cleanup * Some cleanup * Removed some redundand copy constructor calls * Applied review comments * Applied review comments * Update CHANGELOG * Update CHANGELOG
This commit is contained in:
parent
35e2ebc729
commit
2c72655dd5
|
@ -1,6 +1,8 @@
|
|||
v3.5.1 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* Fixed issue #9612: fix ArangoSearch views getting out of sync with collection.
|
||||
|
||||
* Fix an issue with potential spurious wakeups in the internal scheduler code.
|
||||
|
||||
* Changes the _idle_ timeout of stream transactions to 10 seconds and the total
|
||||
|
|
|
@ -278,23 +278,27 @@ template <typename Impl, typename Traits>
|
|||
bool IResearchViewExecutorBase<Impl, Traits>::next(ReadContext& ctx) {
|
||||
auto& impl = static_cast<Impl&>(*this);
|
||||
|
||||
if (_indexReadBuffer.empty()) {
|
||||
impl.fillBuffer(ctx);
|
||||
while (true) {
|
||||
if (_indexReadBuffer.empty()) {
|
||||
impl.fillBuffer(ctx);
|
||||
}
|
||||
|
||||
if (_indexReadBuffer.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
IndexReadBufferEntry bufferEntry = _indexReadBuffer.pop_front();
|
||||
|
||||
if (impl.writeRow(ctx, bufferEntry)) {
|
||||
break;
|
||||
} else {
|
||||
// to get correct stats we should continue looking for
|
||||
// other documents inside this one call
|
||||
LOG_TOPIC("550cd", TRACE, arangodb::iresearch::TOPIC)
|
||||
<< "failed to write row in node executor";
|
||||
}
|
||||
}
|
||||
|
||||
if (_indexReadBuffer.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
IndexReadBufferEntry bufferEntry = _indexReadBuffer.pop_front();
|
||||
|
||||
if (impl.writeRow(ctx, bufferEntry)) {
|
||||
// we read and wrote a document, return true. we don't know if there are more.
|
||||
return true; // do not change iterator if already reached limit
|
||||
}
|
||||
|
||||
// no documents found, we're exhausted.
|
||||
return false;
|
||||
return true;
|
||||
}
|
||||
|
||||
template<typename Impl, typename Traits>
|
||||
|
|
|
@ -203,7 +203,7 @@ Result ClusterCollection::updateProperties(VPackSlice const& slice, bool doSync)
|
|||
TRI_ASSERT(_info.isClosed());
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index>& idx : _indexes) {
|
||||
for (auto& idx : _indexes) {
|
||||
static_cast<ClusterIndex*>(idx.get())->updateProperties(_info.slice());
|
||||
}
|
||||
|
||||
|
@ -323,11 +323,12 @@ void ClusterCollection::prepareIndexes(arangodb::velocypack::Slice indexesSlice)
|
|||
addIndex(std::move(idx));
|
||||
}
|
||||
|
||||
if (_indexes[0]->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
auto it = _indexes.cbegin();
|
||||
if ((*it)->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
(_logicalCollection.type() == TRI_COL_TYPE_EDGE &&
|
||||
(_indexes[1]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX ||
|
||||
((*++it)->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX ||
|
||||
(_indexes.size() >= 3 && _engineType == ClusterEngineType::RocksDBEngine &&
|
||||
_indexes[2]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX)))) {
|
||||
(*++it)->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX)))) {
|
||||
std::string msg =
|
||||
"got invalid indexes for collection '" + _logicalCollection.name() + "'";
|
||||
|
||||
|
@ -382,16 +383,14 @@ bool ClusterCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
return true;
|
||||
}
|
||||
|
||||
size_t i = 0;
|
||||
WRITE_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> index : _indexes) {
|
||||
if (iid == index->id()) {
|
||||
_indexes.erase(_indexes.begin() + i);
|
||||
for (auto it : _indexes) {
|
||||
if (iid == it->id()) {
|
||||
_indexes.erase(it);
|
||||
events::DropIndex(_logicalCollection.vocbase().name(), _logicalCollection.name(),
|
||||
std::to_string(iid), TRI_ERROR_NO_ERROR);
|
||||
return true;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
|
||||
// We tried to remove an index that does not exist
|
||||
|
@ -495,7 +494,7 @@ void ClusterCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
|
|||
return;
|
||||
}
|
||||
}
|
||||
_indexes.emplace_back(idx);
|
||||
_indexes.emplace(idx);
|
||||
}
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -76,6 +76,8 @@ class IResearchMMFilesLink final : public arangodb::MMFilesIndex, public IResear
|
|||
bool isSorted() const override { return IResearchLink::isSorted(); }
|
||||
|
||||
bool isHidden() const override { return IResearchLink::isHidden(); }
|
||||
|
||||
bool needsReversal() const override { return true; }
|
||||
|
||||
void load() override { IResearchLink::load(); }
|
||||
|
||||
|
|
|
@ -68,6 +68,8 @@ class IResearchRocksDBLink final : public arangodb::RocksDBIndex, public IResear
|
|||
bool isSorted() const override { return IResearchLink::isSorted(); }
|
||||
|
||||
bool isHidden() const override { return IResearchLink::isHidden(); }
|
||||
|
||||
bool needsReversal() const override { return true; }
|
||||
|
||||
void load() override { IResearchLink::load(); }
|
||||
|
||||
|
|
|
@ -224,6 +224,9 @@ class Index {
|
|||
/// @brief whether or not any attribute is expanded
|
||||
inline bool hasExpansion() const { return _useExpansion; }
|
||||
|
||||
/// @brief if index needs explicit reversal and wouldn`t be reverted by storage rollback
|
||||
virtual bool needsReversal() const { return false; }
|
||||
|
||||
/// @brief whether or not the index covers all the attributes passed in
|
||||
virtual bool covers(std::unordered_set<std::string> const& attributes) const;
|
||||
|
||||
|
@ -249,7 +252,7 @@ class Index {
|
|||
static IndexType type(char const* type, size_t len);
|
||||
|
||||
static IndexType type(std::string const& type);
|
||||
|
||||
|
||||
public:
|
||||
|
||||
virtual char const* typeName() const = 0;
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "MMFilesCollection.h"
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Aql/PlanCache.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
|
@ -50,7 +51,6 @@
|
|||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "MMFiles/MMFilesTransactionState.h"
|
||||
#include "MMFilesCollection.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "Scheduler/Scheduler.h"
|
||||
#include "Scheduler/SchedulerFeature.h"
|
||||
|
@ -169,7 +169,7 @@ Result persistLocalDocumentIdIterator(MMFilesMarker const* marker, void* data,
|
|||
case TRI_DF_MARKER_VPACK_DOCUMENT: {
|
||||
auto transactionId = MMFilesDatafileHelper::TransactionId(marker);
|
||||
|
||||
VPackSlice const slice(reinterpret_cast<uint8_t const*>(marker) +
|
||||
VPackSlice const slice(reinterpret_cast<uint8_t const*>(marker) +
|
||||
MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT));
|
||||
uint8_t const* vpack = slice.begin();
|
||||
|
||||
|
@ -335,7 +335,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(MMFilesMarker const* mar
|
|||
transaction::Methods* trx = state->_trx;
|
||||
TRI_ASSERT(trx != nullptr);
|
||||
|
||||
VPackSlice const slice(reinterpret_cast<uint8_t const*>(marker) +
|
||||
VPackSlice const slice(reinterpret_cast<uint8_t const*>(marker) +
|
||||
MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT));
|
||||
uint8_t const* vpack = slice.begin();
|
||||
|
||||
|
@ -376,8 +376,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(MMFilesMarker const* mar
|
|||
|
||||
// no primary index lock required here because we are the only ones reading
|
||||
// from the index ATM
|
||||
MMFilesSimpleIndexElement* found =
|
||||
state->_primaryIndex->lookupKeyRef(trx, keySlice);
|
||||
MMFilesSimpleIndexElement* found = state->_primaryIndex->lookupKeyRef(trx, keySlice);
|
||||
|
||||
// it is a new entry
|
||||
if (found == nullptr || !found->isSet()) {
|
||||
|
@ -454,7 +453,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(MMFilesMarker const* mar
|
|||
TRI_ASSERT(physical != nullptr);
|
||||
transaction::Methods* trx = state->_trx;
|
||||
|
||||
VPackSlice const slice(reinterpret_cast<uint8_t const*>(marker) +
|
||||
VPackSlice const slice(reinterpret_cast<uint8_t const*>(marker) +
|
||||
MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_REMOVE));
|
||||
|
||||
VPackSlice keySlice;
|
||||
|
@ -480,8 +479,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(MMFilesMarker const* mar
|
|||
|
||||
// no primary index lock required here because we are the only ones reading
|
||||
// from the index ATM
|
||||
MMFilesSimpleIndexElement found =
|
||||
state->_primaryIndex->lookupKey(trx, keySlice);
|
||||
MMFilesSimpleIndexElement found = state->_primaryIndex->lookupKey(trx, keySlice);
|
||||
|
||||
// it is a new entry, so we missed the create
|
||||
if (!found) {
|
||||
|
@ -643,7 +641,7 @@ MMFilesCollection::MMFilesCollection(LogicalCollection& logical,
|
|||
_path = mmfiles._path;
|
||||
_doCompact = mmfiles._doCompact;
|
||||
_maxTick = mmfiles._maxTick;
|
||||
|
||||
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
setCompactionStatus("compaction not yet started");
|
||||
// not copied
|
||||
|
@ -677,7 +675,8 @@ bool MMFilesCollection::isVolatile() const { return _isVolatile; }
|
|||
|
||||
/// @brief closes an open collection
|
||||
int MMFilesCollection::close() {
|
||||
LOG_TOPIC("2408b", DEBUG, Logger::ENGINES) << "closing '" << _logicalCollection.name() << "'";
|
||||
LOG_TOPIC("2408b", DEBUG, Logger::ENGINES)
|
||||
<< "closing '" << _logicalCollection.name() << "'";
|
||||
if (!_logicalCollection.deleted() && !_logicalCollection.vocbase().isDropped()) {
|
||||
auto primIdx = primaryIndex();
|
||||
auto idxSize = primIdx->size();
|
||||
|
@ -941,10 +940,12 @@ int MMFilesCollection::reserveJournalSpace(TRI_voc_tick_t tick, uint32_t size,
|
|||
df.release();
|
||||
TRI_ASSERT(_journals.size() == 1);
|
||||
} catch (basics::Exception const& ex) {
|
||||
LOG_TOPIC("dfb34", ERR, Logger::COLLECTOR) << "cannot select journal: " << ex.what();
|
||||
LOG_TOPIC("dfb34", ERR, Logger::COLLECTOR)
|
||||
<< "cannot select journal: " << ex.what();
|
||||
return ex.code();
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC("79da1", ERR, Logger::COLLECTOR) << "cannot select journal: " << ex.what();
|
||||
LOG_TOPIC("79da1", ERR, Logger::COLLECTOR)
|
||||
<< "cannot select journal: " << ex.what();
|
||||
return TRI_ERROR_INTERNAL;
|
||||
} catch (...) {
|
||||
LOG_TOPIC("167e7", ERR, Logger::COLLECTOR)
|
||||
|
@ -1197,9 +1198,9 @@ MMFilesDatafile* MMFilesCollection::createDatafile(TRI_voc_fid_t fid, uint32_t j
|
|||
std::string oldName = datafile->getName();
|
||||
std::string jname("journal-" + std::to_string(datafile->fid()) + ".db");
|
||||
std::string filename = arangodb::basics::FileUtils::buildFilename(path(), jname);
|
||||
|
||||
|
||||
LOG_TOPIC("3e87e", TRACE, arangodb::Logger::DATAFILES)
|
||||
<< "renaming journal '" << datafile->getName() << "' to '" << filename << "'";
|
||||
<< "renaming journal '" << datafile->getName() << "' to '" << filename << "'";
|
||||
|
||||
int res = datafile->rename(filename);
|
||||
|
||||
|
@ -1670,24 +1671,24 @@ int MMFilesCollection::fillAllIndexes(transaction::Methods& trx) {
|
|||
|
||||
/// @brief Fill the given list of Indexes
|
||||
int MMFilesCollection::fillIndexes(transaction::Methods& trx,
|
||||
std::vector<std::shared_ptr<arangodb::Index>> const& indexes,
|
||||
PhysicalCollection::IndexContainerType const& indexes,
|
||||
bool skipPersistent) {
|
||||
// distribute the work to index threads plus this thread
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
size_t const n = indexes.size();
|
||||
|
||||
if (n == 0 || (n == 1 && indexes[0].get()->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX)) {
|
||||
if (n == 0 || (n == 1 && (*indexes.begin())->type() ==
|
||||
Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX)) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
bool rolledBack = false;
|
||||
auto rollbackAll = [&]() -> void {
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
auto idx = indexes[i].get();
|
||||
for (auto& idx : indexes) {
|
||||
if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
continue;
|
||||
}
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx);
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx.get());
|
||||
if (midx->isPersistent()) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1714,8 +1715,7 @@ int MMFilesCollection::fillIndexes(transaction::Methods& trx,
|
|||
// give the index a size hint
|
||||
auto primaryIdx = primaryIndex();
|
||||
auto nrUsed = primaryIdx->size();
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
auto idx = indexes[i];
|
||||
for (auto& idx : indexes) {
|
||||
if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1739,8 +1739,7 @@ int MMFilesCollection::fillIndexes(transaction::Methods& trx,
|
|||
documents.reserve(blockSize);
|
||||
|
||||
auto insertInAllIndexes = [&]() -> void {
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
auto idx = indexes[i];
|
||||
for (auto& idx : indexes) {
|
||||
if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
continue;
|
||||
}
|
||||
|
@ -1966,7 +1965,9 @@ int MMFilesCollection::iterateMarkersOnLoad(transaction::Methods* trx) {
|
|||
// pick up persistent id flag from state
|
||||
_hasAllPersistentLocalIds.store(openState._hasAllPersistentLocalIds);
|
||||
auto engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
|
||||
LOG_TOPIC_IF("40333", WARN, arangodb::Logger::ENGINES, !openState._hasAllPersistentLocalIds && !engine->upgrading() && !engine->inRecovery())
|
||||
LOG_TOPIC_IF("40333", WARN, arangodb::Logger::ENGINES,
|
||||
!openState._hasAllPersistentLocalIds && !engine->upgrading() &&
|
||||
!engine->inRecovery())
|
||||
<< "collection '" << _logicalCollection.name() << "' does not have all "
|
||||
<< "persistent LocalDocumentIds; cannot be linked to an arangosearch "
|
||||
"view";
|
||||
|
@ -2133,9 +2134,10 @@ void MMFilesCollection::prepareIndexes(VPackSlice indexesSlice) {
|
|||
{
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
TRI_ASSERT(!_indexes.empty());
|
||||
if (_indexes[0]->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
auto it = _indexes.cbegin();
|
||||
if ((*it)->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
(TRI_COL_TYPE_EDGE == _logicalCollection.type() &&
|
||||
(_indexes.size() < 2 || _indexes[1]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX))) {
|
||||
(_indexes.size() < 2 || (*++it)->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX))) {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
for (auto const& it : _indexes) {
|
||||
LOG_TOPIC("5e00b", ERR, arangodb::Logger::ENGINES) << "- " << it.get();
|
||||
|
@ -2174,17 +2176,14 @@ void MMFilesCollection::prepareIndexes(VPackSlice indexesSlice) {
|
|||
TRI_ASSERT(!_indexes.empty());
|
||||
}
|
||||
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(
|
||||
arangodb::velocypack::Slice const& info,
|
||||
bool restore,
|
||||
bool& created) {
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(arangodb::velocypack::Slice const& info,
|
||||
bool restore, bool& created) {
|
||||
transaction::StandaloneContext ctx(_logicalCollection.vocbase());
|
||||
|
||||
SingleCollectionTransaction trx(
|
||||
std::shared_ptr<transaction::Context>(
|
||||
std::shared_ptr<transaction::Context>(),
|
||||
&ctx), // aliasing ctor
|
||||
_logicalCollection, AccessMode::Type::EXCLUSIVE);
|
||||
std::shared_ptr<transaction::Context>(std::shared_ptr<transaction::Context>(),
|
||||
&ctx), // aliasing ctor
|
||||
_logicalCollection, AccessMode::Type::EXCLUSIVE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
|
@ -2201,11 +2200,9 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(
|
|||
return idx;
|
||||
}
|
||||
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(
|
||||
transaction::Methods& trx,
|
||||
velocypack::Slice const& info,
|
||||
bool restore,
|
||||
bool& created) {
|
||||
std::shared_ptr<Index> MMFilesCollection::createIndex(transaction::Methods& trx,
|
||||
velocypack::Slice const& info,
|
||||
bool restore, bool& created) {
|
||||
// prevent concurrent dropping
|
||||
// TRI_ASSERT(trx->isLocked(&_logicalCollection, AccessMode::Type::READ));
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
@ -2237,11 +2234,12 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(
|
|||
|
||||
bool generateKey = !restore; // Restore is not allowed to generate an id
|
||||
try {
|
||||
idx = engine->indexFactory().prepareIndexFromSlice(info, generateKey, _logicalCollection, false);
|
||||
idx = engine->indexFactory().prepareIndexFromSlice(info, generateKey,
|
||||
_logicalCollection, false);
|
||||
} catch (std::exception const& ex) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_ARANGO_INDEX_CREATION_FAILED, ex.what());
|
||||
}
|
||||
|
||||
|
||||
TRI_ASSERT(idx != nullptr);
|
||||
|
||||
if (!restore) {
|
||||
|
@ -2264,11 +2262,9 @@ std::shared_ptr<Index> MMFilesCollection::createIndex(
|
|||
<< "' but found conflicting index '" << builder.slice().toJson() << "'";
|
||||
#endif
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_ARANGO_DUPLICATE_IDENTIFIER,
|
||||
"duplicate value for `" +
|
||||
arangodb::StaticStrings::IndexId +
|
||||
"duplicate value for `" + arangodb::StaticStrings::IndexId +
|
||||
"` or `" +
|
||||
arangodb::StaticStrings::IndexName +
|
||||
"`");
|
||||
arangodb::StaticStrings::IndexName + "`");
|
||||
}
|
||||
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
@ -2306,8 +2302,8 @@ int MMFilesCollection::saveIndex(transaction::Methods& trx, std::shared_ptr<Inde
|
|||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
// we cannot persist PrimaryIndex
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
std::vector<std::shared_ptr<arangodb::Index>> indexListLocal;
|
||||
indexListLocal.emplace_back(idx);
|
||||
IndexContainerType indexListLocal;
|
||||
indexListLocal.emplace(idx);
|
||||
|
||||
int res = fillIndexes(trx, indexListLocal, false);
|
||||
|
||||
|
@ -2327,11 +2323,13 @@ int MMFilesCollection::saveIndex(transaction::Methods& trx, std::shared_ptr<Inde
|
|||
<< "cannot save index definition: " << ex.what();
|
||||
return TRI_ERROR_INTERNAL;
|
||||
} catch (...) {
|
||||
LOG_TOPIC("a05ba", ERR, arangodb::Logger::ENGINES) << "cannot save index definition";
|
||||
LOG_TOPIC("a05ba", ERR, arangodb::Logger::ENGINES)
|
||||
<< "cannot save index definition";
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
if (builder == nullptr) {
|
||||
LOG_TOPIC("63a71", ERR, arangodb::Logger::ENGINES) << "cannot save index definition";
|
||||
LOG_TOPIC("63a71", ERR, arangodb::Logger::ENGINES)
|
||||
<< "cannot save index definition";
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
}
|
||||
|
||||
|
@ -2379,7 +2377,7 @@ bool MMFilesCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
|
|||
|
||||
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(id));
|
||||
|
||||
_indexes.emplace_back(idx);
|
||||
_indexes.emplace(idx);
|
||||
if (idx->type() == Index::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
TRI_ASSERT(idx->id() == 0);
|
||||
_primaryIndex = static_cast<MMFilesPrimaryIndex*>(idx.get());
|
||||
|
@ -2462,27 +2460,19 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
bool MMFilesCollection::removeIndex(TRI_idx_iid_t iid) {
|
||||
WRITE_LOCKER(guard, _indexesLock);
|
||||
|
||||
size_t const n = _indexes.size();
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
auto idx = _indexes[i];
|
||||
|
||||
for (auto& idx : _indexes) {
|
||||
if (!idx->canBeDropped()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (idx->id() == iid) {
|
||||
// found!
|
||||
idx->drop();
|
||||
|
||||
_indexes.erase(_indexes.begin() + i);
|
||||
|
||||
// update statistics
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx.get());
|
||||
if (midx->isPersistent()) {
|
||||
--_persistentIndexes;
|
||||
}
|
||||
|
||||
_indexes.erase(idx);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -2821,14 +2811,14 @@ Result MMFilesCollection::truncate(transaction::Methods& trx, OperationOptions&
|
|||
}
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
auto indexes = _indexes;
|
||||
size_t const n = indexes.size();
|
||||
|
||||
TRI_voc_tick_t tick = TRI_NewTickServer();
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
auto idx = indexes[i];
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
idx->afterTruncate(tick);
|
||||
if (_indexes.size() > 1) {
|
||||
auto idx = _indexes.begin();
|
||||
++idx; // skip primary index
|
||||
for (; idx != _indexes.end(); ++idx) {
|
||||
TRI_ASSERT((*idx)->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
(*idx)->afterTruncate(tick);
|
||||
}
|
||||
}
|
||||
|
||||
return Result();
|
||||
|
@ -2859,8 +2849,7 @@ LocalDocumentId MMFilesCollection::reuseOrCreateLocalDocumentId(OperationOptions
|
|||
|
||||
Result MMFilesCollection::insert(arangodb::transaction::Methods* trx, VPackSlice const slice,
|
||||
arangodb::ManagedDocumentResult& resultMdr,
|
||||
OperationOptions& options,
|
||||
bool lock, KeyLockInfo* keyLockInfo,
|
||||
OperationOptions& options, bool lock, KeyLockInfo* keyLockInfo,
|
||||
std::function<void()> const& callbackDuringLock) {
|
||||
LocalDocumentId const documentId = reuseOrCreateLocalDocumentId(options);
|
||||
auto isEdgeCollection = (TRI_COL_TYPE_EDGE == _logicalCollection.type());
|
||||
|
@ -3007,10 +2996,11 @@ Result MMFilesCollection::insert(arangodb::transaction::Methods* trx, VPackSlice
|
|||
if (options.returnNew) {
|
||||
resultMdr.setManaged(doc.begin());
|
||||
TRI_ASSERT(resultMdr.revisionId() == revisionId);
|
||||
} else if (!options.silent) { // need to pass up revision and key
|
||||
} else if (!options.silent) { // need to pass up revision and key
|
||||
transaction::BuilderLeaser keyBuilder(trx);
|
||||
keyBuilder->openObject(/*unindexed*/true);
|
||||
keyBuilder->add(StaticStrings::KeyString, transaction::helpers::extractKeyFromDocument(doc));
|
||||
keyBuilder->openObject(/*unindexed*/ true);
|
||||
keyBuilder->add(StaticStrings::KeyString,
|
||||
transaction::helpers::extractKeyFromDocument(doc));
|
||||
keyBuilder->close();
|
||||
resultMdr.setManaged()->assign(reinterpret_cast<char const*>(keyBuilder->start()),
|
||||
keyBuilder->size());
|
||||
|
@ -3262,34 +3252,31 @@ Result MMFilesCollection::insertSecondaryIndexes(arangodb::transaction::Methods&
|
|||
Result result;
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
if (_indexes.size() > 1) {
|
||||
auto idx = _indexes.begin();
|
||||
idx++; // skip primary index
|
||||
for (; idx != _indexes.end(); ++idx) {
|
||||
TRI_ASSERT((*idx)->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx->get());
|
||||
if (!useSecondary && !midx->isPersistent()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
auto indexes = _indexes;
|
||||
size_t const n = indexes.size();
|
||||
Result res = midx->insert(trx, documentId, doc, mode);
|
||||
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
auto idx = indexes[i];
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx.get());
|
||||
if (!useSecondary && !midx->isPersistent()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Result res = midx->insert(trx, documentId, doc, mode);
|
||||
|
||||
// in case of no-memory, return immediately
|
||||
if (res.errorNumber() == TRI_ERROR_OUT_OF_MEMORY) {
|
||||
return res;
|
||||
}
|
||||
if (!res.ok()) {
|
||||
if (res.errorNumber() == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED ||
|
||||
result.ok()) {
|
||||
// "prefer" unique constraint violated
|
||||
result = res;
|
||||
// in case of no-memory, return immediately
|
||||
if (res.errorNumber() == TRI_ERROR_OUT_OF_MEMORY) {
|
||||
return res;
|
||||
}
|
||||
if (!res.ok()) {
|
||||
if (res.errorNumber() == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED ||
|
||||
result.ok()) {
|
||||
// "prefer" unique constraint violated
|
||||
result = res;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -3311,26 +3298,22 @@ Result MMFilesCollection::deleteSecondaryIndexes(transaction::Methods& trx,
|
|||
Result result;
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
auto indexes = _indexes;
|
||||
size_t const n = indexes.size();
|
||||
|
||||
for (size_t i = 1; i < n; ++i) {
|
||||
auto idx = indexes[i];
|
||||
TRI_ASSERT(idx->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx.get());
|
||||
if (!useSecondary && !midx->isPersistent()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
Result res = midx->remove(trx, documentId, doc, mode);
|
||||
|
||||
if (res.fail()) {
|
||||
// an error occurred
|
||||
result = res;
|
||||
if (_indexes.size() > 1) {
|
||||
auto idx = _indexes.begin();
|
||||
++idx; // skip primary index
|
||||
for (; idx != _indexes.end(); ++idx) {
|
||||
TRI_ASSERT((*idx)->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX);
|
||||
MMFilesIndex* midx = static_cast<MMFilesIndex*>(idx->get());
|
||||
if (!useSecondary && !midx->isPersistent()) {
|
||||
continue;
|
||||
}
|
||||
Result res = midx->remove(trx, documentId, doc, mode);
|
||||
if (res.fail()) {
|
||||
// an error occurred
|
||||
result = res;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
|
@ -3413,8 +3396,8 @@ Result MMFilesCollection::insertDocument(arangodb::transaction::Methods& trx,
|
|||
|
||||
Result MMFilesCollection::update(arangodb::transaction::Methods* trx,
|
||||
VPackSlice const newSlice, ManagedDocumentResult& resultMdr,
|
||||
OperationOptions& options,
|
||||
bool lock, ManagedDocumentResult& previousMdr) {
|
||||
OperationOptions& options, bool lock,
|
||||
ManagedDocumentResult& previousMdr) {
|
||||
VPackSlice key = newSlice.get(StaticStrings::KeyString);
|
||||
if (key.isNone()) {
|
||||
return Result(TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD);
|
||||
|
@ -3493,7 +3476,8 @@ Result MMFilesCollection::update(arangodb::transaction::Methods* trx,
|
|||
if (arangodb::shardKeysChanged(_logicalCollection, oldDoc, builder->slice(), true)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES);
|
||||
}
|
||||
if (arangodb::smartJoinAttributeChanged(_logicalCollection, oldDoc, builder->slice(), true)) {
|
||||
if (arangodb::smartJoinAttributeChanged(_logicalCollection, oldDoc,
|
||||
builder->slice(), true)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SMART_JOIN_ATTRIBUTE);
|
||||
}
|
||||
}
|
||||
|
@ -3621,7 +3605,8 @@ Result MMFilesCollection::replace(transaction::Methods* trx, VPackSlice const ne
|
|||
if (arangodb::shardKeysChanged(_logicalCollection, oldDoc, builder->slice(), false)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES);
|
||||
}
|
||||
if (arangodb::smartJoinAttributeChanged(_logicalCollection, oldDoc, builder->slice(), false)) {
|
||||
if (arangodb::smartJoinAttributeChanged(_logicalCollection, oldDoc,
|
||||
builder->slice(), false)) {
|
||||
return Result(TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SMART_JOIN_ATTRIBUTE);
|
||||
}
|
||||
}
|
||||
|
@ -3675,10 +3660,9 @@ Result MMFilesCollection::replace(transaction::Methods* trx, VPackSlice const ne
|
|||
}
|
||||
|
||||
Result MMFilesCollection::remove(transaction::Methods& trx, velocypack::Slice slice,
|
||||
ManagedDocumentResult& previousMdr, OperationOptions& options,
|
||||
bool lock, KeyLockInfo* keyLockInfo,
|
||||
ManagedDocumentResult& previousMdr,
|
||||
OperationOptions& options, bool lock, KeyLockInfo* keyLockInfo,
|
||||
std::function<void()> const& callbackDuringLock) {
|
||||
|
||||
LocalDocumentId const documentId = LocalDocumentId::create();
|
||||
transaction::BuilderLeaser builder(&trx);
|
||||
TRI_voc_rid_t revisionId;
|
||||
|
@ -3976,8 +3960,7 @@ Result MMFilesCollection::removeFastPath(transaction::Methods& trx, TRI_voc_rid_
|
|||
/// @brief looks up a document by key, low level worker
|
||||
/// the caller must make sure the read lock on the collection is held
|
||||
/// the key must be a string slice, no revision check is performed
|
||||
LocalDocumentId MMFilesCollection::lookupDocument(transaction::Methods* trx,
|
||||
VPackSlice key,
|
||||
LocalDocumentId MMFilesCollection::lookupDocument(transaction::Methods* trx, VPackSlice key,
|
||||
ManagedDocumentResult& result) {
|
||||
TRI_ASSERT(key.isString());
|
||||
MMFilesSimpleIndexElement element = primaryIndex()->lookupKey(trx, key, result);
|
||||
|
|
|
@ -358,7 +358,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
|
||||
/// @brief Fill indexes used in recovery
|
||||
int fillIndexes(transaction::Methods& trx,
|
||||
std::vector<std::shared_ptr<Index>> const& indexes,
|
||||
PhysicalCollection::IndexContainerType const& indexes,
|
||||
bool skipPersistent = true);
|
||||
|
||||
int openWorker(bool ignoreErrors);
|
||||
|
|
|
@ -465,6 +465,12 @@ Result MMFilesHashIndex::insert(transaction::Methods& trx, LocalDocumentId const
|
|||
/// @brief removes an entry from the hash array part of the hash index
|
||||
Result MMFilesHashIndex::remove(transaction::Methods& trx, LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc, Index::OperationMode mode) {
|
||||
TRI_IF_FAILURE("BreakHashIndexRemove") {
|
||||
if (type() == arangodb::Index::IndexType::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
// intentionally break index removal
|
||||
return Result(TRI_ERROR_INTERNAL, "BreakHashIndexRemove failure point triggered");
|
||||
}
|
||||
}
|
||||
Result res;
|
||||
std::vector<MMFilesHashIndexElement*> elements;
|
||||
int r = fillElement<MMFilesHashIndexElement>(elements, documentId, doc);
|
||||
|
|
|
@ -274,7 +274,7 @@ void RocksDBCollection::prepareIndexes(arangodb::velocypack::Slice indexesSlice)
|
|||
|
||||
if (idx) {
|
||||
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(id));
|
||||
_indexes.emplace_back(idx);
|
||||
_indexes.emplace(idx);
|
||||
if (idx->type() == Index::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
TRI_ASSERT(idx->id() == 0);
|
||||
_primaryIndex = static_cast<RocksDBPrimaryIndex*>(idx.get());
|
||||
|
@ -282,11 +282,12 @@ void RocksDBCollection::prepareIndexes(arangodb::velocypack::Slice indexesSlice)
|
|||
}
|
||||
}
|
||||
|
||||
if (_indexes[0]->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
auto it = _indexes.cbegin();
|
||||
if ((*it)->type() != Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX ||
|
||||
(TRI_COL_TYPE_EDGE == _logicalCollection.type() &&
|
||||
(_indexes.size() < 3 ||
|
||||
(_indexes[1]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX ||
|
||||
_indexes[2]->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX)))) {
|
||||
((*++it)->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX ||
|
||||
(*++it)->type() != Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX)))) {
|
||||
std::string msg =
|
||||
"got invalid indexes for collection '" + _logicalCollection.name() + "'";
|
||||
LOG_TOPIC("0ef34", ERR, arangodb::Logger::ENGINES) << msg;
|
||||
|
@ -428,7 +429,7 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(VPackSlice const& info,
|
|||
const bool inBackground =
|
||||
basics::VelocyPackHelper::getBooleanValue(info, StaticStrings::IndexInBackground, false);
|
||||
if (inBackground) { // allow concurrent inserts into index
|
||||
_indexes.emplace_back(buildIdx);
|
||||
_indexes.emplace(buildIdx);
|
||||
res = buildIdx->fillIndexBackground(locker);
|
||||
} else {
|
||||
res = buildIdx->fillIndexForeground();
|
||||
|
@ -441,14 +442,15 @@ std::shared_ptr<Index> RocksDBCollection::createIndex(VPackSlice const& info,
|
|||
// Step 5. register in index list
|
||||
WRITE_LOCKER(guard, _indexesLock);
|
||||
if (inBackground) { // swap in actual index
|
||||
for (size_t i = 0; i < _indexes.size(); i++) {
|
||||
if (_indexes[i]->id() == buildIdx->id()) {
|
||||
_indexes[i] = idx;
|
||||
for (auto& it : _indexes) {
|
||||
if (it->id() == buildIdx->id()) {
|
||||
_indexes.erase(it);
|
||||
_indexes.emplace(idx);
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_indexes.push_back(idx);
|
||||
_indexes.emplace(idx);
|
||||
}
|
||||
guard.unlock();
|
||||
#if USE_PLAN_CACHE
|
||||
|
@ -506,15 +508,13 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
|
||||
std::shared_ptr<arangodb::Index> toRemove;
|
||||
{
|
||||
size_t i = 0;
|
||||
WRITE_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index>& idx : _indexes) {
|
||||
if (iid == idx->id()) {
|
||||
toRemove = std::move(idx);
|
||||
_indexes.erase(_indexes.begin() + i);
|
||||
for (auto& it : _indexes) {
|
||||
if (iid == it->id()) {
|
||||
toRemove = it;
|
||||
_indexes.erase(it);
|
||||
break;
|
||||
}
|
||||
++i;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1268,14 +1268,18 @@ void RocksDBCollection::figuresSpecific(std::shared_ptr<arangodb::velocypack::Bu
|
|||
|
||||
namespace {
|
||||
template<typename F>
|
||||
void reverseIdxOps(std::vector<std::shared_ptr<Index>> const& vector,
|
||||
std::vector<std::shared_ptr<Index>>::const_iterator& it,
|
||||
void reverseIdxOps(PhysicalCollection::IndexContainerType const& indexes,
|
||||
PhysicalCollection::IndexContainerType::const_iterator& it,
|
||||
F&& op) {
|
||||
while (it != vector.begin()) {
|
||||
while (it != indexes.begin()) {
|
||||
it--;
|
||||
auto* rIdx = static_cast<RocksDBIndex*>(it->get());
|
||||
if (rIdx->type() == Index::TRI_IDX_TYPE_IRESEARCH_LINK) {
|
||||
std::forward<F>(op)(rIdx);
|
||||
if (rIdx->needsReversal()) {
|
||||
if (std::forward<F>(op)(rIdx).fail()) {
|
||||
// best effort for reverse failed. Let`s trigger full rollback
|
||||
// or we will end up with inconsistent storage and indexes
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "Failed to reverse index operation.");
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1318,13 +1322,11 @@ Result RocksDBCollection::insertDocument(arangodb::transaction::Methods* trx,
|
|||
for (auto it = _indexes.begin(); it != _indexes.end(); it++) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(it->get());
|
||||
res = rIdx->insert(*trx, mthds, documentId, doc, options.indexOperationMode);
|
||||
|
||||
needReversal = needReversal || rIdx->type() == Index::TRI_IDX_TYPE_IRESEARCH_LINK;
|
||||
|
||||
needReversal = needReversal || rIdx->needsReversal();
|
||||
if (res.fail()) {
|
||||
if (needReversal && !state->isSingleOperation()) {
|
||||
::reverseIdxOps(_indexes, it, [mthds, trx, &documentId, &doc, &options](RocksDBIndex* rid) {
|
||||
rid->remove(*trx, mthds, documentId, doc, options.indexOperationMode);
|
||||
::reverseIdxOps(_indexes, it, [mthds, trx, &documentId, &doc](RocksDBIndex* rid) {
|
||||
return rid->remove(*trx, mthds, documentId, doc, Index::OperationMode::rollback);
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
@ -1365,11 +1367,17 @@ Result RocksDBCollection::removeDocument(arangodb::transaction::Methods* trx,
|
|||
<< " objectID " << _objectId << " name: " << _logicalCollection.name();*/
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(idx.get());
|
||||
res = ridx->remove(*trx, mthds, documentId, doc, options.indexOperationMode);
|
||||
|
||||
bool needReversal = false;
|
||||
for (auto it = _indexes.begin(); it != _indexes.end(); it++) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(it->get());
|
||||
res = rIdx->remove(*trx, mthds, documentId, doc, options.indexOperationMode);
|
||||
needReversal = needReversal || rIdx->needsReversal();
|
||||
if (res.fail()) {
|
||||
if (needReversal && !trx->isSingleOperationTransaction()) {
|
||||
::reverseIdxOps(_indexes, it, [mthds, trx, &documentId, &doc](RocksDBIndex* rid) {
|
||||
return rid->insert(*trx, mthds, documentId, doc, Index::OperationMode::rollback);
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -1419,16 +1427,23 @@ Result RocksDBCollection::updateDocument(transaction::Methods* trx,
|
|||
}
|
||||
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
for (std::shared_ptr<Index> const& idx : _indexes) {
|
||||
RocksDBIndex* rIdx = static_cast<RocksDBIndex*>(idx.get());
|
||||
bool needReversal = false;
|
||||
for (auto it = _indexes.begin(); it != _indexes.end(); it++) {
|
||||
auto rIdx = static_cast<RocksDBIndex*>(it->get());
|
||||
res = rIdx->update(*trx, mthds, oldDocumentId, oldDoc, newDocumentId,
|
||||
newDoc, options.indexOperationMode);
|
||||
|
||||
if (res.fail()) {
|
||||
needReversal = needReversal || rIdx->needsReversal();
|
||||
if (!res.ok()) {
|
||||
if (needReversal && !trx->isSingleOperationTransaction()) {
|
||||
::reverseIdxOps(_indexes, it,
|
||||
[mthds, trx, &newDocumentId, &newDoc, &oldDocumentId, &oldDoc](RocksDBIndex* rid) {
|
||||
return rid->update(*trx, mthds, newDocumentId, newDoc, oldDocumentId,
|
||||
oldDoc, Index::OperationMode::rollback);
|
||||
});
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
|
|
|
@ -121,7 +121,7 @@ class RocksDBIndex : public Index {
|
|||
virtual void recalculateEstimates() {}
|
||||
|
||||
bool isPersistent() const override final { return true; }
|
||||
|
||||
|
||||
protected:
|
||||
RocksDBIndex(TRI_idx_iid_t id, LogicalCollection& collection, std::string const& name,
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> const& attributes,
|
||||
|
|
|
@ -868,6 +868,12 @@ Result RocksDBVPackIndex::remove(transaction::Methods& trx, RocksDBMethods* mthd
|
|||
LocalDocumentId const& documentId,
|
||||
velocypack::Slice const& doc,
|
||||
Index::OperationMode mode) {
|
||||
TRI_IF_FAILURE("BreakHashIndexRemove") {
|
||||
if (type() == arangodb::Index::IndexType::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
// intentionally break index removal
|
||||
return Result(TRI_ERROR_INTERNAL, "BreakHashIndexRemove failure point triggered");
|
||||
}
|
||||
}
|
||||
Result res;
|
||||
rocksdb::Status s;
|
||||
SmallVector<RocksDBKey>::allocator_type::arena_type elementsArena;
|
||||
|
|
|
@ -111,7 +111,7 @@ bool PhysicalCollection::hasIndexOfType(arangodb::Index::IndexType type) const {
|
|||
|
||||
/// @brief Find index by definition
|
||||
/*static*/ std::shared_ptr<Index> PhysicalCollection::findIndex(
|
||||
VPackSlice const& info, std::vector<std::shared_ptr<Index>> const& indexes) {
|
||||
VPackSlice const& info, IndexContainerType const& indexes) {
|
||||
TRI_ASSERT(info.isObject());
|
||||
|
||||
auto value = info.get(arangodb::StaticStrings::IndexType); // extract type
|
||||
|
@ -528,7 +528,7 @@ int PhysicalCollection::checkRevision(transaction::Methods*, TRI_voc_rid_t expec
|
|||
/// @brief hands out a list of indexes
|
||||
std::vector<std::shared_ptr<arangodb::Index>> PhysicalCollection::getIndexes() const {
|
||||
READ_LOCKER(guard, _indexesLock);
|
||||
return _indexes;
|
||||
return { _indexes.begin(), _indexes.end() };
|
||||
}
|
||||
|
||||
void PhysicalCollection::getIndexesVPack(VPackBuilder& result, unsigned flags,
|
||||
|
@ -579,4 +579,51 @@ std::shared_ptr<arangodb::velocypack::Builder> PhysicalCollection::figures() {
|
|||
return builder;
|
||||
}
|
||||
|
||||
|
||||
bool PhysicalCollection::IndexOrder::operator()(const std::shared_ptr<Index>& left,
|
||||
const std::shared_ptr<Index>& right) const {
|
||||
// Primary index always first (but two primary indexes render comparsion
|
||||
// invalid but that`s a bug itself)
|
||||
TRI_ASSERT(!((left->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) &&
|
||||
(right->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX)));
|
||||
if (left->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
return true;
|
||||
}
|
||||
if (right->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// edge indexes should go right after primary
|
||||
if (left->type() != right->type()) {
|
||||
if (right->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) {
|
||||
return false;
|
||||
} else if (left->type() == Index::IndexType::TRI_IDX_TYPE_EDGE_INDEX) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
// This failpoint allows CRUD tests to trigger reversal
|
||||
// of index operations. Hash index placed always AFTER reversable indexes
|
||||
// could be broken by unique constraint violation or by intentional failpoint.
|
||||
// And this will make possible to deterministically trigger index reversals
|
||||
TRI_IF_FAILURE("HashIndexAlwaysLast") {
|
||||
if (left->type() != right->type()) {
|
||||
if (right->type() == arangodb::Index::IndexType::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
return true;
|
||||
} else if (left->type() == arangodb::Index::IndexType::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// indexes which needs no reverse should be done first to minimize
|
||||
// need for reversal procedures
|
||||
if (left->needsReversal() != right->needsReversal()) {
|
||||
return right->needsReversal();
|
||||
}
|
||||
// use id to make order of equally-sorted indexes deterministic
|
||||
return left->id() < right->id();
|
||||
}
|
||||
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -23,7 +23,7 @@
|
|||
|
||||
#ifndef ARANGOD_VOCBASE_PHYSICAL_COLLECTION_H
|
||||
#define ARANGOD_VOCBASE_PHYSICAL_COLLECTION_H 1
|
||||
|
||||
#include <set>
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Indexes/Index.h"
|
||||
|
@ -99,9 +99,16 @@ class PhysicalCollection {
|
|||
|
||||
bool hasIndexOfType(arangodb::Index::IndexType type) const;
|
||||
|
||||
/// @brief determines order of index execution on collection
|
||||
struct IndexOrder {
|
||||
bool operator()(const std::shared_ptr<Index>& left,
|
||||
const std::shared_ptr<Index>& right) const;
|
||||
};
|
||||
|
||||
using IndexContainerType = std::set<std::shared_ptr<Index>, IndexOrder> ;
|
||||
/// @brief find index by definition
|
||||
static std::shared_ptr<Index> findIndex(velocypack::Slice const&,
|
||||
std::vector<std::shared_ptr<Index>> const&);
|
||||
IndexContainerType const&);
|
||||
/// @brief Find index by definition
|
||||
std::shared_ptr<Index> lookupIndex(velocypack::Slice const&) const;
|
||||
|
||||
|
@ -137,7 +144,7 @@ class PhysicalCollection {
|
|||
///////////////////////////////////
|
||||
|
||||
virtual Result truncate(transaction::Methods& trx, OperationOptions& options) = 0;
|
||||
|
||||
|
||||
/// @brief compact-data operation
|
||||
virtual Result compact() = 0;
|
||||
|
||||
|
@ -174,12 +181,12 @@ class PhysicalCollection {
|
|||
virtual Result insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice newSlice,
|
||||
arangodb::ManagedDocumentResult& result,
|
||||
OperationOptions& options,
|
||||
bool lock, KeyLockInfo* keyLockInfo,
|
||||
OperationOptions& options, bool lock, KeyLockInfo* keyLockInfo,
|
||||
std::function<void()> const& cbDuringLock) = 0;
|
||||
|
||||
Result insert(arangodb::transaction::Methods* trx, arangodb::velocypack::Slice newSlice,
|
||||
arangodb::ManagedDocumentResult& result, OperationOptions& options, bool lock) {
|
||||
arangodb::ManagedDocumentResult& result,
|
||||
OperationOptions& options, bool lock) {
|
||||
return insert(trx, newSlice, result, options, lock, nullptr, nullptr);
|
||||
}
|
||||
|
||||
|
@ -197,7 +204,7 @@ class PhysicalCollection {
|
|||
ManagedDocumentResult& previous, OperationOptions& options,
|
||||
bool lock, KeyLockInfo* keyLockInfo,
|
||||
std::function<void()> const& cbDuringLock) = 0;
|
||||
|
||||
|
||||
/// @brief new object for insert, value must have _key set correctly.
|
||||
Result newObjectForInsert(transaction::Methods* trx, velocypack::Slice const& value,
|
||||
bool isEdgeCollection, velocypack::Builder& builder,
|
||||
|
@ -240,7 +247,7 @@ class PhysicalCollection {
|
|||
bool const _isDBServer;
|
||||
|
||||
mutable basics::ReadWriteLock _indexesLock;
|
||||
std::vector<std::shared_ptr<Index>> _indexes;
|
||||
IndexContainerType _indexes;
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "Aql/IResearchViewNode.h"
|
||||
|
||||
#include "../Mocks/StorageEngineMock.h"
|
||||
#include "Aql/TestExecutorHelper.h"
|
||||
|
||||
#if USE_ENTERPRISE
|
||||
#include "Enterprise/Ldap/LdapFeature.h"
|
||||
|
@ -69,7 +70,11 @@
|
|||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/LogicalView.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "V8Server/V8DealerFeature.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
|
||||
#include "IResearch/IResearchLinkMeta.h"
|
||||
#include "IResearch/IResearchMMFilesLink.h"
|
||||
#include "IResearch/VelocyPackHelper.h"
|
||||
#include "analysis/analyzers.hpp"
|
||||
#include "analysis/token_attributes.hpp"
|
||||
|
@ -200,12 +205,15 @@ TEST_F(IResearchViewNodeTest, constructSortedView) {
|
|||
|
||||
{
|
||||
auto json = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\":42, \"depth\":0, \"totalNrRegs\":0, \"varInfoList\":[], "
|
||||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"options\": { \"waitForSync\" : "
|
||||
"true, \"collections\":[42] }, \"viewId\": \"" + std::to_string(logicalView->id()) + "\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": false}, { \"field\": \"another.field\", \"asc\":true } ] }");
|
||||
"{ \"id\":42, \"depth\":0, \"totalNrRegs\":0, \"varInfoList\":[], "
|
||||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"options\": { \"waitForSync\" : "
|
||||
"true, \"collections\":[42] }, \"viewId\": \"" +
|
||||
std::to_string(logicalView->id()) +
|
||||
"\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": "
|
||||
"false}, { \"field\": \"another.field\", \"asc\":true } ] }");
|
||||
|
||||
arangodb::iresearch::IResearchViewNode node(*query.plan(), // plan
|
||||
json->slice());
|
||||
|
@ -243,12 +251,16 @@ TEST_F(IResearchViewNodeTest, constructSortedView) {
|
|||
|
||||
{
|
||||
auto json = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"id\":42, \"depth\":0, \"totalNrRegs\":0, \"varInfoList\":[], "
|
||||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"options\": { \"waitForSync\" : "
|
||||
"true, \"collections\":[42] }, \"viewId\": \"" + std::to_string(logicalView->id()) + "\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": false}, { \"field\": \"another.field\", \"asc\":true } ], \"primarySortBuckets\": 1 }");
|
||||
"{ \"id\":42, \"depth\":0, \"totalNrRegs\":0, \"varInfoList\":[], "
|
||||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"options\": { \"waitForSync\" : "
|
||||
"true, \"collections\":[42] }, \"viewId\": \"" +
|
||||
std::to_string(logicalView->id()) +
|
||||
"\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": "
|
||||
"false}, { \"field\": \"another.field\", \"asc\":true } ], "
|
||||
"\"primarySortBuckets\": 1 }");
|
||||
|
||||
arangodb::iresearch::IResearchViewNode node(*query.plan(), // plan
|
||||
json->slice());
|
||||
|
@ -290,8 +302,12 @@ TEST_F(IResearchViewNodeTest, constructSortedView) {
|
|||
"{ \"id\":42, \"depth\":0, \"totalNrRegs\":0, \"varInfoList\":[], "
|
||||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"viewId\": \"" + std::to_string(logicalView->id()) + "\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": false}, { \"field\": \"another.field\", \"asc\":true } ], \"primarySortBuckets\": false }");
|
||||
"\"name\":\"variable\", \"id\":0 }, \"viewId\": \"" +
|
||||
std::to_string(logicalView->id()) +
|
||||
"\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": "
|
||||
"false}, { \"field\": \"another.field\", \"asc\":true } ], "
|
||||
"\"primarySortBuckets\": false }");
|
||||
|
||||
try {
|
||||
arangodb::iresearch::IResearchViewNode node(*query.plan(), // plan
|
||||
|
@ -308,8 +324,12 @@ TEST_F(IResearchViewNodeTest, constructSortedView) {
|
|||
"{ \"id\":42, \"depth\":0, \"totalNrRegs\":0, \"varInfoList\":[], "
|
||||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"viewId\": \"" + std::to_string(logicalView->id()) + "\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": false}, { \"field\": \"another.field\", \"asc\":true } ], \"primarySortBuckets\": 3 }");
|
||||
"\"name\":\"variable\", \"id\":0 }, \"viewId\": \"" +
|
||||
std::to_string(logicalView->id()) +
|
||||
"\", "
|
||||
"\"primarySort\": [ { \"field\": \"my.nested.Fields\", \"asc\": "
|
||||
"false}, { \"field\": \"another.field\", \"asc\":true } ], "
|
||||
"\"primarySortBuckets\": 3 }");
|
||||
|
||||
try {
|
||||
arangodb::iresearch::IResearchViewNode node(*query.plan(), // plan
|
||||
|
@ -354,7 +374,7 @@ TEST_F(IResearchViewNodeTest, construct) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -593,7 +613,7 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -625,7 +645,8 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"viewId\": \"" +
|
||||
std::to_string(logicalView->id()) + "\", \"primarySort\": [], \"primarySortBuckets\": false }");
|
||||
std::to_string(logicalView->id()) +
|
||||
"\", \"primarySort\": [], \"primarySortBuckets\": false }");
|
||||
|
||||
arangodb::iresearch::IResearchViewNode node(*query.plan(), // plan
|
||||
json->slice());
|
||||
|
@ -633,7 +654,7 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -665,7 +686,8 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
"\"nrRegs\":[], \"nrRegsHere\":[], \"regsToClear\":[], "
|
||||
"\"varsUsedLater\":[], \"varsValid\":[], \"outVariable\": { "
|
||||
"\"name\":\"variable\", \"id\":0 }, \"viewId\": \"" +
|
||||
std::to_string(logicalView->id()) + "\", \"primarySort\": [], \"primarySortBuckets\": 42 }");
|
||||
std::to_string(logicalView->id()) +
|
||||
"\", \"primarySort\": [], \"primarySortBuckets\": 42 }");
|
||||
|
||||
arangodb::iresearch::IResearchViewNode node(*query.plan(), // plan
|
||||
json->slice());
|
||||
|
@ -673,7 +695,7 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -714,7 +736,7 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -758,7 +780,7 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -801,7 +823,7 @@ TEST_F(IResearchViewNodeTest, constructFromVPackSingleServer) {
|
|||
EXPECT_TRUE(node.empty()); // view has no links
|
||||
EXPECT_TRUE(node.collections().empty()); // view has no links
|
||||
EXPECT_TRUE(node.shards().empty());
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_TRUE(!node.sort().first); // primary sort is not set by default
|
||||
EXPECT_EQ(0, node.sort().second); // primary sort is not set by default
|
||||
|
||||
EXPECT_TRUE(arangodb::aql::ExecutionNode::ENUMERATE_IRESEARCH_VIEW == node.getType());
|
||||
|
@ -1958,3 +1980,182 @@ TEST_F(IResearchViewNodeTest, createBlockCoordinator) {
|
|||
dynamic_cast<arangodb::aql::ExecutionBlockImpl<arangodb::aql::NoResultsExecutor>*>(
|
||||
emptyBlock.get()));
|
||||
}
|
||||
|
||||
class IResearchViewBlockTest : public ::testing::Test {
|
||||
protected:
|
||||
StorageEngineMock engine;
|
||||
arangodb::application_features::ApplicationServer server;
|
||||
std::vector<std::pair<arangodb::application_features::ApplicationFeature*, bool>> features;
|
||||
|
||||
IResearchViewBlockTest()
|
||||
: engine(server),
|
||||
server(nullptr, nullptr) {
|
||||
arangodb::EngineSelectorFeature::ENGINE = &engine;
|
||||
arangodb::tests::init(true);
|
||||
|
||||
// suppress INFO {authentication} Authentication is turned on (system only), authentication for unix sockets is turned on
|
||||
// suppress WARNING {authentication} --server.jwt-secret is insecure. Use --server.jwt-secret-keyfile instead
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::AUTHENTICATION.name(),
|
||||
arangodb::LogLevel::ERR);
|
||||
|
||||
// suppress log messages since tests check error conditions
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::FIXME.name(), arangodb::LogLevel::ERR); // suppress WARNING DefaultCustomTypeHandler called
|
||||
arangodb::LogTopic::setLogLevel(arangodb::iresearch::TOPIC.name(),
|
||||
arangodb::LogLevel::FATAL);
|
||||
irs::logger::output_le(iresearch::logger::IRL_FATAL, stderr);
|
||||
|
||||
// setup required application features
|
||||
features.emplace_back(new arangodb::FlushFeature(server), false);
|
||||
features.emplace_back(new arangodb::V8DealerFeature(server),
|
||||
false); // required for DatabaseFeature::createDatabase(...)
|
||||
features.emplace_back(new arangodb::ViewTypesFeature(server), true);
|
||||
features.emplace_back(new arangodb::AuthenticationFeature(server), true);
|
||||
features.emplace_back(new arangodb::DatabasePathFeature(server), false);
|
||||
features.emplace_back(new arangodb::DatabaseFeature(server), false);
|
||||
features.emplace_back(new arangodb::ShardingFeature(server), false);
|
||||
features.emplace_back(new arangodb::QueryRegistryFeature(server), false); // must be first
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(
|
||||
features.back().first); // need QueryRegistryFeature feature to be added now in order to create the system database
|
||||
features.emplace_back(new arangodb::TraverserEngineRegistryFeature(server), false); // must be before AqlFeature
|
||||
features.emplace_back(new arangodb::AqlFeature(server), true);
|
||||
features.emplace_back(new arangodb::aql::OptimizerRulesFeature(server), true);
|
||||
features.emplace_back(new arangodb::aql::AqlFunctionFeature(server), true); // required for IResearchAnalyzerFeature
|
||||
features.emplace_back(new arangodb::iresearch::IResearchAnalyzerFeature(server), true);
|
||||
features.emplace_back(new arangodb::iresearch::IResearchFeature(server), true);
|
||||
features.emplace_back(new arangodb::SystemDatabaseFeature(server), true); // required for IResearchAnalyzerFeature
|
||||
|
||||
#if USE_ENTERPRISE
|
||||
features.emplace_back(new arangodb::LdapFeature(server),
|
||||
false); // required for AuthenticationFeature with USE_ENTERPRISE
|
||||
#endif
|
||||
|
||||
// required for V8DealerFeature::prepare(), ClusterFeature::prepare() not required
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(
|
||||
new arangodb::ClusterFeature(server));
|
||||
|
||||
for (auto& f : features) {
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(f.first);
|
||||
}
|
||||
|
||||
for (auto& f : features) {
|
||||
f.first->prepare();
|
||||
}
|
||||
|
||||
auto* dbPathFeature =
|
||||
arangodb::application_features::ApplicationServer::getFeature<arangodb::DatabasePathFeature>(
|
||||
"DatabasePath");
|
||||
arangodb::tests::setDatabasePath(*dbPathFeature); // ensure test data is stored in a unique directory
|
||||
|
||||
|
||||
auto const databases = arangodb::velocypack::Parser::fromJson(
|
||||
std::string("[ { \"name\": \"") +
|
||||
arangodb::StaticStrings::SystemDatabase + "\" } ]");
|
||||
auto* dbFeature =
|
||||
arangodb::application_features::ApplicationServer::lookupFeature<arangodb::DatabaseFeature>(
|
||||
"Database");
|
||||
dbFeature->loadDatabases(databases->slice());
|
||||
|
||||
for (auto& f : features) {
|
||||
if (f.second) {
|
||||
f.first->start();
|
||||
}
|
||||
}
|
||||
auto vocbase = dbFeature->useDatabase(arangodb::StaticStrings::SystemDatabase);
|
||||
std::shared_ptr<arangodb::LogicalCollection> collection0;
|
||||
{
|
||||
auto createJson = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"name\": \"testCollection0\", \"id\" : \"42\" }");
|
||||
collection0 = vocbase->createCollection(createJson->slice());
|
||||
EXPECT_NE(nullptr, collection0);
|
||||
}
|
||||
auto createJson = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"name\": \"testView\", \"type\": \"arangosearch\" }");
|
||||
auto logicalView = vocbase->createView(createJson->slice());
|
||||
EXPECT_NE(nullptr, logicalView);
|
||||
auto updateJson = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"links\": {"
|
||||
"\"testCollection0\": { \"includeAllFields\": true, "
|
||||
"\"trackListPositions\": true }"
|
||||
"}}");
|
||||
EXPECT_TRUE(logicalView->properties(updateJson->slice(), true).ok());
|
||||
std::vector<std::string> EMPTY_VECTOR;
|
||||
auto trx = std::make_shared<arangodb::transaction::Methods>(
|
||||
arangodb::transaction::StandaloneContext::Create(*vocbase), EMPTY_VECTOR,
|
||||
EMPTY_VECTOR, EMPTY_VECTOR,
|
||||
arangodb::transaction::Options());
|
||||
|
||||
EXPECT_TRUE(trx->begin().ok());
|
||||
// Fill dummy data in index only (to simulate some documents where already removed from collection)
|
||||
arangodb::iresearch::IResearchLinkMeta meta;
|
||||
meta._includeAllFields = true;
|
||||
{
|
||||
auto doc = arangodb::velocypack::Parser::fromJson("{ \"key\": 1 }");
|
||||
auto indexes = collection0->getIndexes();
|
||||
EXPECT_TRUE(!indexes.empty());
|
||||
auto* l =
|
||||
static_cast<arangodb::iresearch::IResearchMMFilesLink*>(indexes[0].get());
|
||||
for (size_t i = 2; i < 10; ++i) {
|
||||
l->insert(*trx, arangodb::LocalDocumentId(i), doc->slice(), arangodb::Index::normal);
|
||||
}
|
||||
}
|
||||
// in collection only one alive doc
|
||||
auto aliveDoc = arangodb::velocypack::Parser::fromJson("{ \"key\": 1 }");
|
||||
arangodb::ManagedDocumentResult insertResult;
|
||||
arangodb::OperationOptions options;
|
||||
EXPECT_TRUE(collection0
|
||||
->insert(trx.get(), aliveDoc->slice(), insertResult, options, false)
|
||||
.ok());
|
||||
EXPECT_TRUE(trx->commit().ok());
|
||||
}
|
||||
|
||||
~IResearchViewBlockTest() {
|
||||
arangodb::AqlFeature(server).stop(); // unset singleton instance
|
||||
arangodb::LogTopic::setLogLevel(arangodb::iresearch::TOPIC.name(),
|
||||
arangodb::LogLevel::DEFAULT);
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::FIXME.name(),
|
||||
arangodb::LogLevel::DEFAULT);
|
||||
// destroy application features
|
||||
for (auto& f : features) {
|
||||
if (f.second) {
|
||||
f.first->stop();
|
||||
}
|
||||
}
|
||||
for (auto& f : features) {
|
||||
f.first->unprepare();
|
||||
}
|
||||
arangodb::LogTopic::setLogLevel(arangodb::Logger::AUTHENTICATION.name(),
|
||||
arangodb::LogLevel::DEFAULT);
|
||||
arangodb::application_features::ApplicationServer::server = nullptr;
|
||||
arangodb::EngineSelectorFeature::ENGINE = nullptr;
|
||||
}
|
||||
};
|
||||
|
||||
TEST_F(IResearchViewBlockTest, retrieveWithMissingInCollectionUnordered) {
|
||||
auto* dbFeature =
|
||||
arangodb::application_features::ApplicationServer::lookupFeature<arangodb::DatabaseFeature>(
|
||||
"Database");
|
||||
auto vocbase = dbFeature->useDatabase(arangodb::StaticStrings::SystemDatabase);
|
||||
auto queryResult =
|
||||
arangodb::tests::executeQuery(*vocbase,
|
||||
"FOR d IN testView OPTIONS { waitForSync: true } RETURN d");
|
||||
ASSERT_TRUE(queryResult.result.ok());
|
||||
auto result = queryResult.data->slice();
|
||||
EXPECT_TRUE(result.isArray());
|
||||
arangodb::velocypack::ArrayIterator resultIt(result);
|
||||
ASSERT_EQ(1, resultIt.size());
|
||||
}
|
||||
|
||||
TEST_F(IResearchViewBlockTest, retrieveWithMissingInCollection) {
|
||||
auto* dbFeature =
|
||||
arangodb::application_features::ApplicationServer::lookupFeature<arangodb::DatabaseFeature>(
|
||||
"Database");
|
||||
auto vocbase = dbFeature->useDatabase(arangodb::StaticStrings::SystemDatabase);
|
||||
auto queryResult =
|
||||
arangodb::tests::executeQuery(*vocbase,
|
||||
"FOR d IN testView OPTIONS { waitForSync: true } SORT BM25(d) RETURN d");
|
||||
ASSERT_TRUE(queryResult.result.ok());
|
||||
auto result = queryResult.data->slice();
|
||||
EXPECT_TRUE(result.isArray());
|
||||
arangodb::velocypack::ArrayIterator resultIt(result);
|
||||
ASSERT_EQ(1, resultIt.size());
|
||||
}
|
||||
|
|
|
@ -530,13 +530,13 @@ std::shared_ptr<arangodb::Index> PhysicalCollectionMock::createIndex(
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
_indexes.emplace_back(std::move(index));
|
||||
_indexes.insert(index);
|
||||
created = true;
|
||||
|
||||
res = trx.commit();
|
||||
TRI_ASSERT(res.ok());
|
||||
|
||||
return _indexes.back();
|
||||
return index;
|
||||
}
|
||||
|
||||
void PhysicalCollectionMock::deferDropCollection(
|
||||
|
@ -702,7 +702,7 @@ bool PhysicalCollectionMock::addIndex(std::shared_ptr<arangodb::Index> idx) {
|
|||
|
||||
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(id));
|
||||
|
||||
_indexes.emplace_back(idx);
|
||||
_indexes.insert(idx);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -86,18 +86,23 @@ class PhysicalCollectionTest : public ::testing::Test {
|
|||
// -----------------------------------------------------------------------------
|
||||
|
||||
TEST_F(PhysicalCollectionTest, test_new_object_for_insert) {
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1,
|
||||
"testVocbase");
|
||||
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"test\" }");
|
||||
auto collection = vocbase.createCollection(json->slice());
|
||||
|
||||
auto physical = engine.createPhysicalCollection(*collection, json->slice());
|
||||
|
||||
auto doc = arangodb::velocypack::Parser::fromJson("{ \"doc1\":\"test1\", \"doc100\":\"test2\", \"doc2\":\"test3\", \"z\":1, \"b\":2, \"a\":3, \"Z\":1, \"B\":2, \"A\": 3, \"_foo\":1, \"_bar\":2, \"_zoo\":3 }");
|
||||
auto doc = arangodb::velocypack::Parser::fromJson(
|
||||
"{ \"doc1\":\"test1\", \"doc100\":\"test2\", \"doc2\":\"test3\", "
|
||||
"\"z\":1, \"b\":2, \"a\":3, \"Z\":1, \"B\":2, \"A\": 3, \"_foo\":1, "
|
||||
"\"_bar\":2, \"_zoo\":3 }");
|
||||
|
||||
TRI_voc_rid_t revisionId = 0;
|
||||
arangodb::velocypack::Builder builder;
|
||||
Result res = physical->newObjectForInsert(nullptr, doc->slice(), false, builder, false, revisionId);
|
||||
Result res = physical->newObjectForInsert(nullptr, doc->slice(), false,
|
||||
builder, false, revisionId);
|
||||
EXPECT_TRUE(res.ok());
|
||||
EXPECT_TRUE(revisionId > 0);
|
||||
|
||||
|
@ -109,14 +114,14 @@ TEST_F(PhysicalCollectionTest, test_new_object_for_insert) {
|
|||
EXPECT_TRUE(slice.get("_id").isCustom());
|
||||
EXPECT_TRUE(slice.hasKey("_rev"));
|
||||
EXPECT_TRUE(slice.get("_rev").isString());
|
||||
|
||||
|
||||
EXPECT_TRUE(slice.get("doc1").isString());
|
||||
EXPECT_EQ("test1", slice.get("doc1").copyString());
|
||||
EXPECT_TRUE(slice.get("doc100").isString());
|
||||
EXPECT_EQ("test2", slice.get("doc100").copyString());
|
||||
EXPECT_TRUE(slice.get("doc2").isString());
|
||||
EXPECT_EQ("test3", slice.get("doc2").copyString());
|
||||
|
||||
|
||||
EXPECT_TRUE(slice.hasKey("z"));
|
||||
EXPECT_TRUE(slice.get("z").isNumber());
|
||||
EXPECT_EQ(1, slice.get("z").getNumber<int>());
|
||||
|
@ -126,7 +131,7 @@ TEST_F(PhysicalCollectionTest, test_new_object_for_insert) {
|
|||
EXPECT_TRUE(slice.hasKey("a"));
|
||||
EXPECT_TRUE(slice.get("a").isNumber());
|
||||
EXPECT_EQ(3, slice.get("a").getNumber<int>());
|
||||
|
||||
|
||||
EXPECT_TRUE(slice.hasKey("Z"));
|
||||
EXPECT_TRUE(slice.get("Z").isNumber());
|
||||
EXPECT_EQ(1, slice.get("Z").getNumber<int>());
|
||||
|
@ -136,7 +141,7 @@ TEST_F(PhysicalCollectionTest, test_new_object_for_insert) {
|
|||
EXPECT_TRUE(slice.hasKey("A"));
|
||||
EXPECT_TRUE(slice.get("A").isNumber());
|
||||
EXPECT_EQ(3, slice.get("A").getNumber<int>());
|
||||
|
||||
|
||||
EXPECT_TRUE(slice.hasKey("_foo"));
|
||||
EXPECT_TRUE(slice.get("_foo").isNumber());
|
||||
EXPECT_EQ(1, slice.get("_foo").getNumber<int>());
|
||||
|
@ -153,7 +158,9 @@ TEST_F(PhysicalCollectionTest, test_new_object_for_insert) {
|
|||
// iterate over the data in the order that is stored in the builder
|
||||
{
|
||||
velocypack::ObjectIterator it(slice, true);
|
||||
std::vector<std::string> expected{ "_key", "_id", "_rev", "doc1", "doc100", "doc2", "z", "b", "a", "Z", "B", "A", "_foo", "_bar", "_zoo" };
|
||||
std::vector<std::string> expected{"_key", "_id", "_rev", "doc1", "doc100",
|
||||
"doc2", "z", "b", "a", "Z",
|
||||
"B", "A", "_foo", "_bar", "_zoo"};
|
||||
|
||||
for (auto const& key : expected) {
|
||||
EXPECT_TRUE(it.valid());
|
||||
|
@ -165,12 +172,76 @@ TEST_F(PhysicalCollectionTest, test_new_object_for_insert) {
|
|||
// iterate over the data in the order that is stored in the index table
|
||||
{
|
||||
velocypack::ObjectIterator it(slice, false);
|
||||
std::vector<std::string> expected{ "A", "B", "Z", "_bar", "_foo", "_id", "_key", "_rev", "_zoo", "a", "b", "doc1", "doc100", "doc2", "z" };
|
||||
std::vector<std::string> expected{"A", "B", "Z", "_bar", "_foo",
|
||||
"_id", "_key", "_rev", "_zoo", "a",
|
||||
"b", "doc1", "doc100", "doc2", "z"};
|
||||
|
||||
for (auto const& key : expected) {
|
||||
EXPECT_TRUE(it.valid());
|
||||
EXPECT_EQ(key, it.key().copyString());
|
||||
it.next();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
class MockIndex : public Index {
|
||||
public:
|
||||
MockIndex(Index::IndexType type, bool needsReversal, TRI_idx_iid_t id,
|
||||
LogicalCollection& collection, const std::string& name,
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> const& fields,
|
||||
bool unique, bool sparse)
|
||||
: Index(id, collection, name, fields, unique, sparse),
|
||||
_type(type),
|
||||
_needsReversal(needsReversal) {}
|
||||
|
||||
bool needsReversal() const override { return _needsReversal; }
|
||||
IndexType type() const override { return _type; }
|
||||
char const* typeName() const override { return "IndexMock"; }
|
||||
bool isPersistent() const override { return true; }
|
||||
bool canBeDropped() const override { return true; }
|
||||
bool isSorted() const override { return false; }
|
||||
bool isHidden() const override { return false; }
|
||||
bool hasSelectivityEstimate() const override { return false; }
|
||||
size_t memory() const override { return 0; }
|
||||
void load() override {}
|
||||
void unload() override {}
|
||||
|
||||
private:
|
||||
Index::IndexType _type;
|
||||
bool _needsReversal;
|
||||
};
|
||||
|
||||
TEST_F(PhysicalCollectionTest, test_index_ordeing) {
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1,
|
||||
"testVocbase");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"name\": \"test\" }");
|
||||
auto collection = vocbase.createCollection(json->slice());
|
||||
std::vector<std::vector<arangodb::basics::AttributeName>> dummyFields;
|
||||
PhysicalCollection::IndexContainerType test_container;
|
||||
// also regular index but no need to be reversed
|
||||
test_container.insert(std::make_shared<MockIndex>(Index::TRI_IDX_TYPE_HASH_INDEX,
|
||||
false, 2, *collection, "4",
|
||||
dummyFields, false, false));
|
||||
// Edge index- should go right after primary and after all other non-reversable edge indexes
|
||||
test_container.insert(std::make_shared<MockIndex>(Index::TRI_IDX_TYPE_EDGE_INDEX,
|
||||
true, 3, *collection, "3",
|
||||
dummyFields, false, false));
|
||||
// Edge index- non-reversable should go right after primary
|
||||
test_container.insert(std::make_shared<MockIndex>(Index::TRI_IDX_TYPE_EDGE_INDEX,
|
||||
false, 4, *collection, "2",
|
||||
dummyFields, false, false));
|
||||
// Primary index. Should be first!
|
||||
test_container.insert(std::make_shared<MockIndex>(Index::TRI_IDX_TYPE_PRIMARY_INDEX,
|
||||
true, 5, *collection, "1",
|
||||
dummyFields, true, false));
|
||||
// should execute last - regular index with reversal possible
|
||||
test_container.insert(std::make_shared<MockIndex>(Index::TRI_IDX_TYPE_HASH_INDEX,
|
||||
true, 1, *collection, "5",
|
||||
dummyFields, false, false));
|
||||
|
||||
TRI_idx_iid_t prevId = 5;
|
||||
for (auto idx : test_container) {
|
||||
ASSERT_EQ(prevId, idx->id());
|
||||
--prevId;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,270 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertUndefined, assertEqual, assertTrue, assertFalse, assertNotNull, fail */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Andrei Lobov
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var db = require("@arangodb").db;
|
||||
var ERRORS = require("@arangodb").errors;
|
||||
var internal = require('internal');
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function iResearchFeatureAqlServerSideTestSuite () {
|
||||
return {
|
||||
setUpAll : function () {
|
||||
},
|
||||
|
||||
tearDownAll : function () {
|
||||
},
|
||||
testViewWithInterruptedInserts : function() {
|
||||
if (!internal.debugCanUseFailAt()) {
|
||||
return;
|
||||
}
|
||||
internal.debugClearFailAt();
|
||||
|
||||
let docsCollectionName = "docs";
|
||||
let docsViewName = "docs_view";
|
||||
try { db._drop(docsCollectionName); } catch(e) {}
|
||||
try { db._dropView(docsViewName); } catch(e) {}
|
||||
internal.debugSetFailAt('HashIndexAlwaysLast');
|
||||
let docsCollection = db._create(docsCollectionName);
|
||||
let docsView = db._createView(docsViewName, "arangosearch", {
|
||||
"links": {
|
||||
"docs": {
|
||||
"analyzers": ["identity"],
|
||||
"fields": {},
|
||||
"includeAllFields": true,
|
||||
"storeValues": "id",
|
||||
"trackListPositions": false
|
||||
}
|
||||
} ,
|
||||
consolidationIntervalMsec:0,
|
||||
cleanupIntervalStep:0
|
||||
});
|
||||
let docs = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
let docId = "TestDoc" + i;
|
||||
docs.push({ _id: "docs/" + docId, _key: docId, "indexField": i });
|
||||
}
|
||||
docsCollection.save(docs);
|
||||
|
||||
// test singleOperationTransaction
|
||||
try {
|
||||
docsCollection.save(docs[5]);
|
||||
fail();
|
||||
} catch(e) {
|
||||
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, e.errorNum);
|
||||
}
|
||||
assertEqual(docs.length, db._query("FOR u IN " + docsCollectionName +
|
||||
" COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
assertEqual(docs.length, db._query("FOR u IN " + docsViewName +
|
||||
" OPTIONS { waitForSync : true } COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
|
||||
// testMultipleOparationTransaction (no index revert as PK will be violated)
|
||||
let docsNew = [];
|
||||
for (let i = 11; i < 20; i++) {
|
||||
let docId = "TestDoc" + i;
|
||||
docsNew.push({ _id: "docs/" + docId, _key: docId, "indexField": i });
|
||||
}
|
||||
docsNew.push(docs[5]); // this one will cause PK violation
|
||||
docsCollection.save(docsNew);
|
||||
assertEqual(docs.length + docsNew.length - 1,
|
||||
db._query("FOR u IN " + docsCollectionName +
|
||||
" COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
assertEqual(docs.length + docsNew.length - 1,
|
||||
db._query("FOR u IN " + docsViewName +
|
||||
" OPTIONS { waitForSync : true } COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
|
||||
// add another index (to make it fail after arangosearch insert passed)
|
||||
// index will be placed after arangosearch due to failpoint 'HashIndexAlwaysLast'
|
||||
docsCollection.ensureIndex({type: "hash", unique: true, fields:["indexField"]});
|
||||
|
||||
// single operation insert (will fail on unique index)
|
||||
try {
|
||||
docsCollection.save({ _id: "docs/fail", _key: "fail", "indexField": 0 });
|
||||
fail();
|
||||
} catch(e) {
|
||||
assertEqual(ERRORS.ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED.code, e.errorNum);
|
||||
}
|
||||
assertEqual(docs.length + docsNew.length - 1,
|
||||
db._query("FOR u IN " + docsCollectionName +
|
||||
" COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
assertEqual(docs.length + docsNew.length - 1,
|
||||
db._query("FOR u IN " + docsViewName +
|
||||
" OPTIONS { waitForSync : true } COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
|
||||
// testMultipleOparationTransaction (arangosearch index revert will be needed)
|
||||
let docsNew2 = [];
|
||||
for (let i = 21; i < 30; i++) {
|
||||
let docId = "TestDoc" + i;
|
||||
docsNew2.push({ _id: "docs/" + docId, _key: docId, "indexField": i });
|
||||
}
|
||||
docsNew2.push({ _id: "docs/fail2", _key: "fail2", "indexField": 0 });// this one will cause hash unique violation
|
||||
docsCollection.save(docsNew2);
|
||||
assertEqual(docs.length + docsNew.length + docsNew2.length - 2,
|
||||
db._query("FOR u IN " + docsCollectionName +
|
||||
" COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
assertEqual(docs.length + docsNew.length + docsNew2.length - 2,
|
||||
db._query("FOR u IN " + docsViewName +
|
||||
" OPTIONS { waitForSync : true } COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
|
||||
db._drop(docsCollectionName);
|
||||
db._dropView(docsViewName);
|
||||
internal.debugRemoveFailAt('HashIndexAlwaysLast');
|
||||
},
|
||||
|
||||
testViewWithInterruptedUpdates : function() {
|
||||
if (!internal.debugCanUseFailAt()) {
|
||||
return;
|
||||
}
|
||||
internal.debugClearFailAt();
|
||||
let docsCollectionName = "docs";
|
||||
let docsViewName = "docs_view";
|
||||
try { db._drop(docsCollectionName); } catch(e) {}
|
||||
try { db._dropView(docsViewName); } catch(e) {}
|
||||
internal.debugSetFailAt('HashIndexAlwaysLast');
|
||||
let docsCollection = db._create(docsCollectionName);
|
||||
let docsView = db._createView(docsViewName, "arangosearch", {
|
||||
"links": {
|
||||
"docs": {
|
||||
"analyzers": ["identity"],
|
||||
"fields": {},
|
||||
"includeAllFields": true,
|
||||
"storeValues": "id",
|
||||
"trackListPositions": false
|
||||
}
|
||||
} ,
|
||||
consolidationIntervalMsec:0,
|
||||
cleanupIntervalStep:0
|
||||
});
|
||||
|
||||
let docs = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
let docId = "TestDoc" + i;
|
||||
docs.push({ _id: "docs/" + docId, _key: docId, "indexField": i });
|
||||
}
|
||||
docsCollection.save(docs);
|
||||
// sanity check. Should be in sync
|
||||
assertEqual(docs.length, db._query("FOR u IN " + docsCollectionName +
|
||||
" COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
assertEqual(docs.length, db._query("FOR u IN " + docsViewName +
|
||||
" OPTIONS { waitForSync : true } COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
|
||||
|
||||
// add another index (to make it fail after arangosearch update passed)
|
||||
// index will be placed after arangosearch due to failpoint 'HashIndexAlwaysLast'
|
||||
docsCollection.ensureIndex({type: "hash", unique: true, fields:["indexField"]});
|
||||
|
||||
let docsUpdateIds = [];
|
||||
let docsUpdateData = [];
|
||||
docsUpdateIds.push(docs[0]._id);
|
||||
docsUpdateData.push({"indexField": 999}); // valid
|
||||
docsUpdateIds.push(docs[1]._id);
|
||||
docsUpdateData.push({"indexField": docs[2].indexField}); // will be conflict
|
||||
docsCollection.update(docsUpdateIds, docsUpdateData);
|
||||
|
||||
// documents should stay consistent
|
||||
let collectionDocs = db._query("FOR d IN " + docsCollectionName + " SORT d._id ASC RETURN d").toArray();
|
||||
let viewDocs = db._query("FOR d IN " + docsViewName + " OPTIONS { waitForSync : true } SORT d._id ASC RETURN d").toArray();
|
||||
assertEqual(collectionDocs.length, viewDocs.length);
|
||||
for(let i = 0; i < viewDocs.length; i++) {
|
||||
assertEqual(viewDocs[i]._id, collectionDocs[i]._id);
|
||||
}
|
||||
db._drop(docsCollectionName);
|
||||
db._dropView(docsViewName);
|
||||
internal.debugRemoveFailAt('HashIndexAlwaysLast');
|
||||
},
|
||||
|
||||
testViewWithInterruptedRemoves : function() {
|
||||
if (!internal.debugCanUseFailAt()) {
|
||||
return;
|
||||
}
|
||||
internal.debugClearFailAt();
|
||||
let docsCollectionName = "docs";
|
||||
let docsViewName = "docs_view";
|
||||
try { db._drop(docsCollectionName); } catch(e) {}
|
||||
try { db._dropView(docsViewName); } catch(e) {}
|
||||
internal.debugSetFailAt('HashIndexAlwaysLast');
|
||||
let docsCollection = db._create(docsCollectionName);
|
||||
let docsView = db._createView(docsViewName, "arangosearch", {
|
||||
"links": {
|
||||
"docs": {
|
||||
"analyzers": ["identity"],
|
||||
"fields": {},
|
||||
"includeAllFields": true,
|
||||
"storeValues": "id",
|
||||
"trackListPositions": false
|
||||
}
|
||||
} ,
|
||||
consolidationIntervalMsec:0,
|
||||
cleanupIntervalStep:0
|
||||
});
|
||||
|
||||
let docs = [];
|
||||
for (let i = 0; i < 10; i++) {
|
||||
let docId = "TestDoc" + i;
|
||||
docs.push({ _id: "docs/" + docId, _key: docId, "indexField": i });
|
||||
}
|
||||
docsCollection.save(docs);
|
||||
// sanity check. Should be in sync
|
||||
assertEqual(docs.length, db._query("FOR u IN " + docsCollectionName +
|
||||
" COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
assertEqual(docs.length, db._query("FOR u IN " + docsViewName +
|
||||
" OPTIONS { waitForSync : true } COLLECT WITH COUNT INTO length RETURN length").toArray()[0]);
|
||||
|
||||
|
||||
// add another index (to make it fail after arangosearch remove passed)
|
||||
// index will be placed after arangosearch due to failpoint 'HashIndexAlwaysLast'
|
||||
docsCollection.ensureIndex({type: "hash", unique: true, fields:["indexField"]});
|
||||
let docsRemoveIds = [];
|
||||
docsRemoveIds.push(docs[2]._id);
|
||||
docsRemoveIds.push(docs[3]._id);
|
||||
internal.debugSetFailAt('BreakHashIndexRemove');
|
||||
docsCollection.remove(docsRemoveIds);
|
||||
internal.debugRemoveFailAt('BreakHashIndexRemove');
|
||||
// documents should stay consistent
|
||||
let collectionDocs = db._query("FOR d IN " + docsCollectionName + " SORT d._id ASC RETURN d").toArray();
|
||||
let viewDocs = db._query("FOR d IN " + docsViewName + " OPTIONS { waitForSync : true } SORT d._id ASC RETURN d").toArray();
|
||||
assertEqual(collectionDocs.length, docs.length); // removes should not pass if failpoint worked!
|
||||
assertEqual(collectionDocs.length, viewDocs.length);
|
||||
for(let i = 0; i < viewDocs.length; i++) {
|
||||
assertEqual(viewDocs[i]._id, collectionDocs[i]._id);
|
||||
}
|
||||
|
||||
db._drop(docsCollectionName);
|
||||
db._dropView(docsViewName);
|
||||
internal.debugRemoveFailAt('HashIndexAlwaysLast');
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(iResearchFeatureAqlServerSideTestSuite);
|
||||
|
||||
return jsunity.done();
|
Loading…
Reference in New Issue