1
0
Fork 0

Upgrade old MMFiles collections for compatibility with ArangoSearch. (#7248)

This commit is contained in:
Dan Larkin-York 2018-11-07 15:52:37 +01:00 committed by Jan
parent 7306cdaa03
commit b447f98a7a
31 changed files with 1074 additions and 52 deletions

1
.gitignore vendored
View File

@ -29,6 +29,7 @@ build
Thumbs.db
enterprise
upgrade-data-tests
compile_commands.json
instanceinfo.json
testresult.json

View File

@ -59,15 +59,15 @@ class ClusterCollection final : public PhysicalCollection {
ClusterCollection(LogicalCollection& collection, PhysicalCollection const*); // use in cluster only!!!!!
~ClusterCollection();
/// @brief fetches current index selectivity estimates
/// if allowUpdate is true, will potentially make a cluster-internal roundtrip to
/// fetch current values!
std::unordered_map<std::string, double> clusterIndexEstimates(bool allowUpdate) const override;
/// @brief sets the current index selectivity estimates
void clusterIndexEstimates(std::unordered_map<std::string, double>&& estimates) override;
/// @brief flushes the current index selectivity estimates
void flushClusterIndexEstimates() override;
@ -182,6 +182,8 @@ class ClusterCollection final : public PhysicalCollection {
bool lock, TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId) override;
bool hasAllPersistentLocalIds() const { return false; }
protected:
/// @brief Inject figures that are specific to StorageEngine
void figuresSpecific(

View File

@ -24,7 +24,10 @@
#include "Cluster/ServerState.h"
#include "Logger/Logger.h"
#include "Logger/LogMacros.h"
#include "StorageEngine/PhysicalCollection.h"
#include "VocBase/LogicalCollection.h"
#include "IResearchCommon.h"
#include "IResearchMMFilesLink.h"
#include "IResearchLinkHelper.h"
@ -60,6 +63,18 @@ IResearchMMFilesLink::~IResearchMMFilesLink() {
auto* link = static_cast<arangodb::iresearch::IResearchMMFilesLink*>(ptr.get());
#endif
// ensure loaded so that we have valid data in next check
if (TRI_VOC_COL_STATUS_LOADED != collection.status()) {
collection.load();
}
if (!collection.getPhysical()->hasAllPersistentLocalIds()) {
LOG_TOPIC(ERR, arangodb::iresearch::TOPIC)
<< "mmfiles collection uses pre-3.4 format and cannot be linked to an "
<< "arangosearch view; try recreating collection and moving the "
<< "contents to the new collection";
return nullptr;
}
return link && link->init(definition) ? ptr : nullptr;
} catch (arangodb::basics::Exception& e) {
LOG_TOPIC(WARN, Logger::DEVEL)

View File

@ -28,6 +28,7 @@
#include "Basics/FileUtils.h"
#include "Basics/PerformanceLogScope.h"
#include "Basics/ReadLocker.h"
#include "Basics/ReadUnlocker.h"
#include "Basics/Result.h"
#include "Basics/StaticStrings.h"
#include "Basics/VelocyPackHelper.h"
@ -39,6 +40,7 @@
#include "Indexes/IndexIterator.h"
#include "Logger/Logger.h"
#include "MMFiles/MMFilesCollectionWriteLocker.h"
#include "MMFiles/MMFilesCompactorThread.h"
#include "MMFiles/MMFilesDatafile.h"
#include "MMFiles/MMFilesDatafileHelper.h"
#include "MMFiles/MMFilesDocumentOperation.h"
@ -76,32 +78,27 @@ namespace arangodb {
struct OpenIteratorState {
LogicalCollection* _collection;
arangodb::MMFilesPrimaryIndex* _primaryIndex;
TRI_voc_tid_t _tid;
TRI_voc_fid_t _fid;
TRI_voc_tid_t _tid{0};
TRI_voc_fid_t _fid{0};
std::unordered_map<TRI_voc_fid_t, MMFilesDatafileStatisticsContainer*>
_stats;
MMFilesDatafileStatisticsContainer* _dfi;
MMFilesDatafileStatisticsContainer* _dfi{nullptr};
transaction::Methods* _trx;
ManagedDocumentResult _mdr;
IndexLookupContext _context;
uint64_t _deletions;
uint64_t _documents;
int64_t _initialCount;
uint64_t _deletions{0};
uint64_t _documents{0};
int64_t _initialCount{-1};
bool _hasAllPersistentLocalIds{true};
OpenIteratorState(LogicalCollection* collection, transaction::Methods* trx)
: _collection(collection),
_primaryIndex(
static_cast<MMFilesCollection*>(collection->getPhysical())
->primaryIndex()),
_tid(0),
_fid(0),
_stats(),
_dfi(nullptr),
_trx(trx),
_context(trx, collection, &_mdr, 1),
_deletions(0),
_documents(0),
_initialCount(-1) {
_context(trx, collection, &_mdr, 1) {
TRI_ASSERT(collection != nullptr);
TRI_ASSERT(trx != nullptr);
}
@ -156,6 +153,88 @@ static MMFilesDatafileStatisticsContainer* FindDatafileStats(
return stats.release();
}
bool countDocumentsIterator(MMFilesMarker const* marker,
void* counter, MMFilesDatafile*) {
TRI_ASSERT(nullptr != counter);
if (marker->getType() == TRI_DF_MARKER_VPACK_DOCUMENT) {
(*static_cast<int*>(counter))++;
}
return true;
}
Result persistLocalDocumentIdIterator(
MMFilesMarker const* marker, void* data, MMFilesDatafile* inputFile) {
Result res;
auto outputFile = static_cast<MMFilesDatafile*>(data);
switch (marker->getType()) {
case TRI_DF_MARKER_VPACK_DOCUMENT: {
auto transactionId = MMFilesDatafileHelper::TransactionId(marker);
VPackSlice const slice(
reinterpret_cast<char const*>(marker) +
MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT));
uint8_t const* vpack = slice.begin();
LocalDocumentId localDocumentId;
if (marker->getSize() ==
MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT)
+ slice.byteSize() + sizeof(LocalDocumentId::BaseType)) {
// we do have a LocalDocumentId stored at the end of the marker
uint8_t const* ptr = vpack + slice.byteSize();
localDocumentId = LocalDocumentId(
encoding::readNumber<LocalDocumentId::BaseType>(
ptr, sizeof(LocalDocumentId::BaseType)));
} else {
localDocumentId = LocalDocumentId::create();
}
MMFilesCrudMarker updatedMarker(TRI_DF_MARKER_VPACK_DOCUMENT,
transactionId, localDocumentId, slice);
std::unique_ptr<char[]> buffer(new char[updatedMarker.size()]);
MMFilesMarker* outputMarker =
reinterpret_cast<MMFilesMarker*>(buffer.get());
MMFilesDatafileHelper::InitMarker(outputMarker, updatedMarker.type(),
updatedMarker.size(),
marker->getTick());
updatedMarker.store(buffer.get());
MMFilesMarker* result;
res = outputFile->reserveElement(outputMarker->getSize(), &result, 0);
if (res.fail()) {
return res;
}
res = outputFile->writeCrcElement(result, outputMarker);
if (res.fail()) {
return res;
}
break;
}
case TRI_DF_MARKER_HEADER:
case TRI_DF_MARKER_COL_HEADER:
case TRI_DF_MARKER_FOOTER: {
// skip marker, either already written by createCompactor or will be
// written by closeCompactor
break;
}
default: {
// direct copy
MMFilesMarker* result;
res = outputFile->reserveElement(marker->getSize(), &result, 0);
if (res.fail()) {
return res;
}
res = outputFile->writeElement(result, marker);
if (res.fail()) {
return res;
}
break;
}
}
return res;
}
} // namespace
arangodb::Result MMFilesCollection::updateProperties(VPackSlice const& slice,
@ -280,6 +359,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(
localDocumentId = LocalDocumentId(encoding::readNumber<LocalDocumentId::BaseType>(ptr, sizeof(LocalDocumentId::BaseType)));
} else {
localDocumentId = LocalDocumentId::create();
state->_hasAllPersistentLocalIds = false;
}
VPackSlice keySlice;
@ -624,6 +704,7 @@ bool MMFilesCollection::isVolatile() const { return _isVolatile; }
/// @brief closes an open collection
int MMFilesCollection::close() {
LOG_TOPIC(DEBUG, Logger::ENGINES) << "closing '" << _logicalCollection.name() << "'";
if (!_logicalCollection.deleted() &&
!_logicalCollection.vocbase().isDropped()) {
auto primIdx = primaryIndex();
@ -1123,6 +1204,7 @@ MMFilesDatafile* MMFilesCollection::createDatafile(TRI_voc_fid_t fid,
// name
if (!isCompactor && datafile->isPhysical()) {
// and use the correct name
std::string oldName = datafile->getName();
std::string jname("journal-" + std::to_string(datafile->fid()) + ".db");
std::string filename =
arangodb::basics::FileUtils::buildFilename(path(), jname);
@ -1142,7 +1224,7 @@ MMFilesDatafile* MMFilesCollection::createDatafile(TRI_voc_fid_t fid,
}
LOG_TOPIC(TRACE, arangodb::Logger::DATAFILES) << "renamed journal from '"
<< datafile->getName() << "' to '"
<< oldName << "' to '"
<< filename << "'";
}
@ -1786,7 +1868,7 @@ int MMFilesCollection::openWorker(bool ignoreErrors) {
void MMFilesCollection::open(bool ignoreErrors) {
VPackBuilder builder;
StorageEngine* engine = EngineSelectorFeature::ENGINE;
auto engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
auto& vocbase = _logicalCollection.vocbase();
auto cid = _logicalCollection.id();
@ -1874,7 +1956,7 @@ void MMFilesCollection::open(bool ignoreErrors) {
}
}
if (!engine->inRecovery()) {
if (!engine->inRecovery() && !engine->upgrading()) {
// build the index structures, and fill the indexes
fillAllIndexes(&trx);
}
@ -1922,6 +2004,13 @@ int MMFilesCollection::iterateMarkersOnLoad(transaction::Methods* trx) {
<< openState._deletions << " deletion markers for collection '"
<< _logicalCollection.name() << "'";
// pick up persistent id flag from state
_hasAllPersistentLocalIds.store(openState._hasAllPersistentLocalIds);
LOG_TOPIC_IF(WARN, arangodb::Logger::ENGINES,
!openState._hasAllPersistentLocalIds)
<< "collection '" << _logicalCollection.name() << "' does not have all "
<< "persistent LocalDocumentIds; cannot be linked to an arangosearch view";
// update the real statistics for the collection
try {
for (auto& it : openState._stats) {
@ -3095,6 +3184,79 @@ void MMFilesCollection::removeLocalDocumentId(LocalDocumentId const& documentId,
}
}
Result MMFilesCollection::persistLocalDocumentIdsForDatafile(
MMFilesCollection& collection, MMFilesDatafile& file) {
Result res;
// make a first pass to count documents and determine output size
size_t numDocuments = 0;
bool ok = TRI_IterateDatafile(&file, ::countDocumentsIterator, &numDocuments);
if (!ok) {
res.reset(TRI_ERROR_INTERNAL, "could not count documents");
return res;
}
size_t outputSizeLimit = file.currentSize() +
(numDocuments * sizeof(LocalDocumentId));
MMFilesDatafile* outputFile = nullptr;
{
READ_UNLOCKER(unlocker, collection._filesLock);
outputFile = collection.createCompactor(file.fid(), outputSizeLimit);
}
if (nullptr == outputFile) {
return Result(TRI_ERROR_INTERNAL);
}
res = TRI_IterateDatafile(&file, ::persistLocalDocumentIdIterator,
outputFile);
if (res.fail()) {
return res;
}
{
READ_UNLOCKER(unlocker, collection._filesLock);
res = collection.closeCompactor(outputFile);
if (res.fail()) {
return res;
}
// TODO detect error in replacement?
MMFilesCompactorThread::RenameDatafileCallback(
&file, outputFile, &collection._logicalCollection);
}
return res;
}
Result MMFilesCollection::persistLocalDocumentIds() {
WRITE_LOCKER(dataLocker, _dataLock);
TRI_ASSERT(_compactors.empty());
// convert journal to datafile first
int res = rotateActiveJournal();
if (TRI_ERROR_NO_ERROR != res && TRI_ERROR_ARANGO_NO_JOURNAL != res) {
return Result(res);
}
// now handle datafiles
{
READ_LOCKER(locker, _filesLock);
for (auto file : _datafiles) {
Result result = persistLocalDocumentIdsForDatafile(*this, *file);
if (result.fail()) {
return result;
}
}
}
_hasAllPersistentLocalIds.store(true);
TRI_ASSERT(_compactors.empty());
TRI_ASSERT(_journals.empty());
return Result();
}
/// @brief creates a new entry in the primary index
Result MMFilesCollection::insertPrimaryIndex(transaction::Methods* trx,
LocalDocumentId const& documentId,

View File

@ -369,6 +369,8 @@ class MMFilesCollection final : public PhysicalCollection {
void removeLocalDocumentId(LocalDocumentId const& documentId, bool updateStats);
Result persistLocalDocumentIds();
private:
void sizeHint(transaction::Methods* trx, int64_t hint);
@ -493,6 +495,11 @@ class MMFilesCollection final : public PhysicalCollection {
LocalDocumentId reuseOrCreateLocalDocumentId(OperationOptions const& options) const;
bool hasAllPersistentLocalIds() const { return _hasAllPersistentLocalIds.load(); }
static Result persistLocalDocumentIdsForDatafile(
MMFilesCollection& collection, MMFilesDatafile& file);
private:
mutable arangodb::MMFilesDitches _ditches;
@ -536,6 +543,9 @@ class MMFilesCollection final : public PhysicalCollection {
bool _doCompact;
TRI_voc_tick_t _maxTick;
// whether or not all documents are stored with a persistent LocalDocumentId
std::atomic<bool> _hasAllPersistentLocalIds{true};
};
}

View File

@ -105,23 +105,36 @@ void MMFilesCompactorThread::DropDatafileCallback(MMFilesDatafile* df, LogicalCo
int res = datafile->rename(filename);
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "cannot rename obsolete datafile '" << copy << "' to '" << filename << "': " << TRI_errno_string(res);
LOG_TOPIC(ERR, Logger::COMPACTOR)
<< "cannot rename obsolete datafile '" << copy << "' to '"
<< filename << "': " << TRI_errno_string(res);
} else {
LOG_TOPIC(DEBUG, Logger::COMPACTOR)
<< "renamed obsolete datafile '" << copy << "' to '"
<< filename << "': " << TRI_errno_string(res);
}
}
LOG_TOPIC(DEBUG, Logger::COMPACTOR) << "finished compacting datafile '" << datafile->getName() << "'";
LOG_TOPIC(DEBUG, Logger::COMPACTOR)
<< "finished compacting datafile '" << datafile->getName() << "'";
int res = datafile->close();
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "cannot close obsolete datafile '" << datafile->getName() << "': " << TRI_errno_string(res);
LOG_TOPIC(ERR, Logger::COMPACTOR)
<< "cannot close obsolete datafile '" << datafile->getName()
<< "': " << TRI_errno_string(res);
} else if (datafile->isPhysical()) {
LOG_TOPIC(DEBUG, Logger::COMPACTOR) << "wiping compacted datafile '" << datafile->getName() << "' from disk";
LOG_TOPIC(DEBUG, Logger::COMPACTOR)
<< "wiping compacted datafile '" << datafile->getName()
<< "' from disk";
res = TRI_UnlinkFile(filename.c_str());
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "cannot wipe obsolete datafile '" << datafile->getName() << "': " << TRI_errno_string(res);
LOG_TOPIC(ERR, Logger::COMPACTOR)
<< "cannot wipe obsolete datafile '" << datafile->getName()
<< "': " << TRI_errno_string(res);
}
// check for .dead files
@ -158,6 +171,7 @@ void MMFilesCompactorThread::RenameDatafileCallback(MMFilesDatafile* datafile,
TRI_ASSERT(collection != nullptr);
auto physical = static_cast<MMFilesCollection*>(collection->getPhysical());
TRI_ASSERT(physical != nullptr);
std::string compactorName = compactor->getName();
bool ok = false;
TRI_ASSERT(datafile->fid() == compactor->fid());
@ -173,10 +187,18 @@ void MMFilesCompactorThread::RenameDatafileCallback(MMFilesDatafile* datafile,
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "unable to rename datafile '" << datafile->getName() << "' to '" << tempFilename << "': " << TRI_errno_string(res);
} else {
LOG_TOPIC(DEBUG, arangodb::Logger::COMPACTOR)
<< "renamed datafile from '" << realName << "' to '"
<< tempFilename << "'";
res = compactor->rename(realName);
if (res != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, Logger::COMPACTOR) << "unable to rename compaction file '" << compactor->getName() << "' to '" << realName << "': " << TRI_errno_string(res);
} else {
LOG_TOPIC(DEBUG, arangodb::Logger::COMPACTOR)
<< "renamed datafile from '" << compactorName << "' to '"
<< tempFilename << "'";
}
}

View File

@ -713,6 +713,52 @@ bool TRI_IterateDatafile(MMFilesDatafile* datafile,
return true;
}
Result TRI_IterateDatafile(MMFilesDatafile* datafile,
Result (*iterator)(MMFilesMarker const*, void*,
MMFilesDatafile*),
void* data) {
TRI_ASSERT(iterator != nullptr);
LOG_TOPIC(DEBUG, arangodb::Logger::DATAFILES) << "iterating over datafile '" << datafile->getName() << "', fid: " << datafile->fid() << ", size: " << datafile->currentSize();
char const* ptr = datafile->data();
char const* end = ptr + datafile->currentSize();
if (datafile->state() != TRI_DF_STATE_READ &&
datafile->state() != TRI_DF_STATE_WRITE) {
return TRI_ERROR_ARANGO_ILLEGAL_STATE;
}
TRI_voc_tick_t maxTick = 0;
TRI_DEFER(TRI_UpdateTickServer(maxTick));
while (ptr < end) {
auto const* marker = reinterpret_cast<MMFilesMarker const*>(ptr);
if (marker->getSize() == 0) {
return Result();
}
TRI_voc_tick_t tick = marker->getTick();
if (tick > maxTick) {
maxTick = tick;
}
// update the tick statistics
TRI_UpdateTicksDatafile(datafile, marker);
Result res = iterator(marker, data, datafile);
if (res.fail()) {
return res;
}
ptr += MMFilesDatafileHelper::AlignedMarkerSize<size_t>(marker);
}
return Result();
}
/// @brief iterates over a datafile
/// also may set datafile's min/max tick values
bool TRI_IterateDatafile(MMFilesDatafile* datafile,

View File

@ -527,12 +527,18 @@ void TRI_UpdateTicksDatafile(MMFilesDatafile*, MMFilesMarker const*);
/// also may set datafile's min/max tick values
////////////////////////////////////////////////////////////////////////////////
bool TRI_IterateDatafile(MMFilesDatafile*,
bool (*iterator)(MMFilesMarker const*, void*,
MMFilesDatafile*),
void* data);
bool TRI_IterateDatafile(
MMFilesDatafile*,
bool (*iterator)(MMFilesMarker const*, void*, MMFilesDatafile*),
void* data);
bool TRI_IterateDatafile(MMFilesDatafile*,
std::function<bool(MMFilesMarker const*, MMFilesDatafile*)> const& cb);
arangodb::Result TRI_IterateDatafile(
MMFilesDatafile*,
arangodb::Result (*iterator)(MMFilesMarker const*, void*, MMFilesDatafile*),
void* data);
bool TRI_IterateDatafile(
MMFilesDatafile*,
std::function<bool(MMFilesMarker const*, MMFilesDatafile*)> const& cb);
#endif

View File

@ -355,6 +355,59 @@ void MMFilesEngine::recoveryDone(TRI_vocbase_t& vocbase) {
_deleted.clear();
}
Result MMFilesEngine::persistLocalDocumentIds(TRI_vocbase_t& vocbase) {
Result result;
LOG_TOPIC(DEBUG, Logger::ENGINES)
<< "beginning upgrade task to persist LocalDocumentIds";
// ensure we are not in recovery
TRI_ASSERT(!inRecovery());
auto guard = scopeGuard([this]() -> void {
_upgrading.store(false);
});
_upgrading.store(true);
// flush the wal and wait for compactor just to be sure
result = flushWal(true, true, false);
if (result.fail()) {
return result;
}
result = catchToResult([this, &result, &vocbase]() -> Result {
// stop the compactor so we can make sure there's no other interference
stopCompactor(&vocbase);
auto collections = vocbase.collections(false);
for (auto c : collections) {
auto collection = static_cast<MMFilesCollection*>(c->getPhysical());
LOG_TOPIC(DEBUG, Logger::ENGINES)
<< "processing collection '" << c->name() << "'";
collection->open(false);
auto guard = scopeGuard([this, &collection]() -> void {
collection->close();
});
result = collection->persistLocalDocumentIds();
if (result.fail()) {
return result;
}
}
return Result();
});
if (result.fail()) {
LOG_TOPIC(ERR, Logger::ENGINES)
<< "failure in persistence: " << result.errorMessage();
}
LOG_TOPIC(DEBUG, Logger::ENGINES)
<< "done with upgrade task to persist LocalDocumentIds";
return result;
}
// fill the Builder object with an array of databases that were detected
// by the storage engine. this method must sort out databases that were not
// fully created (see "createDatabase" below). called at server start only
@ -3619,6 +3672,4 @@ bool MMFilesEngine::isCompactionDisabled() const {
return _compactionDisabled.load() > 0;
}
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
bool MMFilesEngine::upgrading() const { return _upgrading.load(); }

View File

@ -240,6 +240,8 @@ class MMFilesEngine final : public StorageEngine {
// start compactor thread and delete files form collections marked as deleted
void recoveryDone(TRI_vocbase_t& vocbase) override;
Result persistLocalDocumentIds(TRI_vocbase_t& vocbase);
private:
int dropDatabaseMMFiles(TRI_vocbase_t* vocbase);
std::unique_ptr<TRI_vocbase_t> createDatabaseMMFiles(
@ -446,6 +448,9 @@ class MMFilesEngine final : public StorageEngine {
void enableCompaction();
bool isCompactionDisabled() const;
/// @brief whether the engine is currently running an upgrade procedure
bool upgrading() const;
private:
velocypack::Builder getReplicationApplierConfiguration(std::string const& filename, int& status);
int removeReplicationApplierConfiguration(std::string const& filename);
@ -615,6 +620,9 @@ class MMFilesEngine final : public StorageEngine {
// can be called multiple times. the last one to set this to 0 again will
// enable compaction again
std::atomic<uint64_t> _compactionDisabled;
// whether the engine is currently running an upgrade procedure
std::atomic<bool> _upgrading{false};
};
}

View File

@ -1767,7 +1767,7 @@ int MMFilesLogfileManager::waitForCollector(MMFilesWalLogfile::IdType logfileId,
double const end = TRI_microtime() + maxWaitTime;
while (true) {
if (_lastCollectedId >= logfileId) {
if (_lastCollectedId.load() >= logfileId) {
return TRI_ERROR_NO_ERROR;
}
@ -1787,7 +1787,7 @@ int MMFilesLogfileManager::waitForCollector(MMFilesWalLogfile::IdType logfileId,
LOG_TOPIC(DEBUG, arangodb::Logger::ENGINES)
<< "still waiting for collector. logfileId: " << logfileId
<< " lastCollected: " << _lastCollectedId << ", result: " << res;
<< " lastCollected: " << _lastCollectedId.load() << ", result: " << res;
if (res != TRI_ERROR_LOCK_TIMEOUT && res != TRI_ERROR_NO_ERROR) {
// some error occurred
@ -2304,6 +2304,9 @@ int MMFilesLogfileManager::inspectLogfiles() {
<< "setting max HLC value to " << _recoverState->maxRevisionId;
TRI_HybridLogicalClock(_recoverState->maxRevisionId);
// track maximum local document id as well
LocalDocumentId::track(_recoverState->maxLocalDocumentId);
return TRI_ERROR_NO_ERROR;
}

View File

@ -128,6 +128,10 @@ class MMFilesMarkerEnvelope : public MMFilesWalMarker {
return false;
}
TRI_IF_FAILURE("MMFilesCompatibility33") {
return false;
}
// size is header size + vpack size + LocalDocumentId size -> LocalDocumentId contained!
// size is not header size + vpack size + LocalDocumentId size -> no LocalDocumentId contained!
return (size() == MMFilesDatafileHelper::VPackOffset(type()) +
@ -136,9 +140,10 @@ class MMFilesMarkerEnvelope : public MMFilesWalMarker {
}
LocalDocumentId getLocalDocumentId() const override {
uint8_t const* ptr = reinterpret_cast<uint8_t const*>(mem()) +
MMFilesDatafileHelper::VPackOffset(type()) +
arangodb::velocypack::Slice(vpack()).byteSize();
TRI_ASSERT(hasLocalDocumentId());
uint8_t const* ptr = reinterpret_cast<uint8_t const*>(mem()) +
MMFilesDatafileHelper::VPackOffset(type()) +
arangodb::velocypack::Slice(vpack()).byteSize();
return LocalDocumentId(encoding::readNumber<LocalDocumentId::BaseType>(ptr, sizeof(LocalDocumentId::BaseType)));
}
@ -173,6 +178,9 @@ class MMFilesCrudMarker : public MMFilesWalMarker {
/// @brief returns the marker size
uint32_t size() const override final {
TRI_IF_FAILURE("MMFilesCompatibility33") { // don't store local id
return static_cast<uint32_t>(MMFilesDatafileHelper::VPackOffset(_type) + _data.byteSize());
}
if (_localDocumentId.isSet()) {
// we have to take localDocumentId into account
return static_cast<uint32_t>(MMFilesDatafileHelper::VPackOffset(_type) + _data.byteSize()) + sizeof(LocalDocumentId::BaseType);
@ -188,6 +196,9 @@ class MMFilesCrudMarker : public MMFilesWalMarker {
size_t const vpackOffset = MMFilesDatafileHelper::VPackOffset(_type);
size_t const vpackLength = static_cast<size_t>(_data.byteSize());
memcpy(mem + vpackOffset, _data.begin(), vpackLength);
TRI_IF_FAILURE("MMFilesCompatibility33") { // don't store local id
return;
}
if (_localDocumentId.isSet()) {
// also store localDocumentId
encoding::storeNumber<LocalDocumentId::BaseType>(reinterpret_cast<uint8_t*>(mem) + vpackOffset + vpackLength, _localDocumentId.id(), sizeof(LocalDocumentId::BaseType));

View File

@ -45,6 +45,7 @@
#include "Transaction/StandaloneContext.h"
#include "Utils/OperationOptions.h"
#include "Utils/SingleCollectionTransaction.h"
#include "VocBase/LocalDocumentId.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/LogicalView.h"
@ -380,6 +381,15 @@ bool MMFilesWalRecoverState::InitialScanMarker(MMFilesMarker const* marker,
state->maxRevisionId = revisionId;
}
}
if (marker->getSize() == MMFilesDatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT) + payloadSlice.byteSize() + sizeof(LocalDocumentId::BaseType)) {
// we do have a LocalDocumentId stored at the end of the marker
uint8_t const* ptr = payloadSlice.begin() + payloadSlice.byteSize();
LocalDocumentId localDocumentId{encoding::readNumber<LocalDocumentId::BaseType>(ptr, sizeof(LocalDocumentId::BaseType))};
if (!state->maxLocalDocumentId.isSet() ||
localDocumentId > state->maxLocalDocumentId) {
state->maxLocalDocumentId = localDocumentId;
}
}
break;
}

View File

@ -186,6 +186,7 @@ struct MMFilesWalRecoverState {
bool ignoreRecoveryErrors;
int64_t errorCount;
TRI_voc_rid_t maxRevisionId;
LocalDocumentId maxLocalDocumentId;
private:
TRI_voc_tick_t lastDatabaseId;

View File

@ -261,6 +261,8 @@ class RocksDBCollection final : public PhysicalCollection {
inline bool useCache() const noexcept { return (_cacheEnabled && _cachePresent); }
void blackListKey(char const* data, std::size_t len) const;
bool hasAllPersistentLocalIds() const { return true; }
private:
uint64_t const _objectId; // rocksdb-specific object id for collection

View File

@ -207,6 +207,10 @@ class PhysicalCollection {
TRI_voc_rid_t& prevRev,
TRI_voc_rid_t& revisionId) = 0;
// returns true all documents have a persistent LocalDocumentId or false if it
// can change after a server restart
virtual bool hasAllPersistentLocalIds() const = 0;
protected:
PhysicalCollection(
LogicalCollection& collection,

View File

@ -81,6 +81,9 @@ class LocalDocumentId {
/// @brief create a document id from an existing id
static LocalDocumentId create(BaseType id) { return LocalDocumentId(id); }
/// @brief use to track an existing value in recovery to ensure no duplicates
static void track(LocalDocumentId const& id) { TRI_HybridLogicalClock(id.id()); }
private:
BaseType _id;
};

View File

@ -291,6 +291,11 @@ void methods::Upgrade::registerTasks() {
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_COORDINATOR_GLOBAL,
/*database*/ DATABASE_INIT | DATABASE_UPGRADE | DATABASE_EXISTING,
&UpgradeTasks::setupAppBundles);
addTask("persistLocalDocumentIds", "convert collection data from old format",
/*system*/ Flags::DATABASE_ALL,
/*cluster*/ Flags::CLUSTER_NONE | Flags::CLUSTER_DB_SERVER_LOCAL,
/*database*/ DATABASE_UPGRADE,
&UpgradeTasks::persistLocalDocumentIds);
}
UpgradeResult methods::Upgrade::runTasks(

View File

@ -23,6 +23,7 @@
#include "UpgradeTasks.h"
#include "Agency/AgencyComm.h"
#include "Basics/Common.h"
#include "Basics/Exceptions.h"
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
#include "Cluster/ClusterComm.h"
@ -31,6 +32,7 @@
#include "Cluster/ServerState.h"
#include "GeneralServer/AuthenticationFeature.h"
#include "Logger/Logger.h"
#include "MMFiles/MMFilesEngine.h"
#include "RocksDBEngine/RocksDBCommon.h"
#include "RocksDBEngine/RocksDBIndex.h"
#include "StorageEngine/EngineSelectorFeature.h"
@ -479,3 +481,17 @@ bool UpgradeTasks::setupAppBundles(
) {
return ::createSystemCollection(&vocbase, "_appbundles");
}
bool UpgradeTasks::persistLocalDocumentIds(
TRI_vocbase_t& vocbase,
arangodb::velocypack::Slice const& slice
) {
if (EngineSelectorFeature::engineName() == MMFilesEngine::EngineName) {
Result res = basics::catchToResult([&vocbase]() -> Result {
MMFilesEngine* engine = static_cast<MMFilesEngine*>(EngineSelectorFeature::ENGINE);
return engine->persistLocalDocumentIds(vocbase);
});
return res.ok();
}
return true;
}

View File

@ -51,6 +51,7 @@ struct UpgradeTasks {
static bool setupApps(TRI_vocbase_t& vocbase, velocypack::Slice const& slice);
static bool createAppsIndex(TRI_vocbase_t& vocbase, velocypack::Slice const& slice);
static bool setupAppBundles(TRI_vocbase_t& vocbase, velocypack::Slice const& slice);
static bool persistLocalDocumentIds(TRI_vocbase_t& vocbase, velocypack::Slice const& slice);
};
}

View File

@ -568,8 +568,16 @@ function iterateTests(cases, options, jsonReply) {
// tests to run
let caselist = [];
const expandWildcard = ( name ) => {
if (!name.endsWith('*')) {
return name;
}
const prefix = name.substring(0, name.length - 1);
return allTests.filter( ( s ) => s.startsWith(prefix) ).join(',');
};
for (let n = 0; n < cases.length; ++n) {
let splitted = cases[n].split(/[,;\.|]/);
let splitted = expandWildcard(cases[n]).split(/[,;|]/);
for (let m = 0; m < splitted.length; ++m) {
let which = splitted[m];

View File

@ -0,0 +1,249 @@
/* jshint strict: false, sub: true */
/* global print */
'use strict';
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
/// Copyright 2014 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License")
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dan Larkin-York
////////////////////////////////////////////////////////////////////////////////
const _ = require('lodash');
const fs = require('fs');
const pu = require('@arangodb/process-utils');
const tu = require('@arangodb/test-utils');
const toArgv = require('internal').toArgv;
const optionsDocumentation = [
' - `upgradeDataPath`: path to directory containing upgrade data archives'
];
const compareSemVer = (a, b) => {
if (a === b) {
return 0;
}
let lex = false;
const partsA = a.split('-')[0].split('.');
const partsB = b.split('-')[0].split('.');
if (partsA.length < 2 ||
partsA.length > 4 ||
partsB.length < 2 ||
partsB.length > 4 ||
!partsA.every(p => isNaN(Number(p))) ||
!partsB.every(p => isNaN(Number(b)))) {
return (a < b) ? -1 : 1;
}
for (let i = partsA.length; i < 4; i++) {
partsA.push_back("0");
}
for (let i = partsB.length; i < 4; i++) {
partsB.push_back("0");
}
for (let i = 0; i < 4; i++) {
const numA = Number(partsA[i]);
const numB = Number(partsB[i]);
if (numA < numB) {
return -1;
}
if (numB < numA) {
return 1;
}
}
};
const byMinimumSuportedVersion = (version) => {
return (testCase) => {
let supported = true;
testCase.substring(0, testCase.length - 3).split('-').map((s) => {
if (s.startsWith("msv")) {
const msv = s.substring(3);
if (compareSemVer(msv, version) > 0) {
supported = false;
}
}
});
return supported;
};
};
////////////////////////////////////////////////////////////////////////////////
/// set up the test according to the testcase.
////////////////////////////////////////////////////////////////////////////////
const unpackOldData = (engine, version, options, serverOptions) => {
const archiveName = `upgrade-data-${engine}-${version}`;
const dataFile = fs.join(options.upgradeDataPath,
'data',
`${archiveName}.tar.gz`);
const tarOptions = [
'--extract',
'--gunzip',
`--file=${dataFile}`,
`--directory=${serverOptions['database.directory']}`
];
let unpack = pu.executeAndWait('tar', tarOptions, {}, '');
if (unpack.status === false) {
unpack.failed = 1;
return {
status: false,
message: unpack.message
};
}
return {
status: true
};
};
////////////////////////////////////////////////////////////////////////////////
/// testcases themselves
////////////////////////////////////////////////////////////////////////////////
const upgradeData = (engine, version) => {
return (options) => {
const testName = `upgrade_data_${engine}_${version}`;
const dataFile = fs.join(options.upgradeDataPath,
'data',
`upgrade-data-${engine}-${version}.tar.gz`);
if (options.storageEngine !== engine) {
// engine mismatch, skip!
const res = {};
res[testName] = {
failed: 0,
'status': true,
'message': 'skipped because of engine mismatch',
'skipped': true
};
return res;
} else if (!fs.exists(dataFile)) {
// data file missing
const res = {};
if (options.upgradeDataMissingShouldFail) {
res[testName] = {
failed: 1,
'status': false,
'message': `failed due to missing data file ${dataFile}`,
'skipped': false
};
} else {
res[testName] = {
failed: 0,
'status': true,
'message': `skipped due to missing data file ${dataFile}`,
'skipped': true
};
}
return res;
}
const tmpDataDir = fs.join(fs.getTempPath(), testName);
fs.makeDirectoryRecursive(tmpDataDir);
pu.cleanupDBDirectoriesAppend(tmpDataDir);
const appDir = fs.join(tmpDataDir, 'apps');
fs.makeDirectoryRecursive(appDir);
const tmpDir = fs.join(tmpDataDir, 'tmp');
fs.makeDirectoryRecursive(tmpDir);
const dataDir = fs.join(tmpDataDir, 'data');
fs.makeDirectoryRecursive(dataDir);
const port = pu.findFreePort(options.minPort, options.maxPort);
let args = pu.makeArgs.arangod(options, appDir, '', tmpDir);
args['server.endpoint'] = 'tcp://127.0.0.1:' + port;
args['database.directory'] = dataDir;
args['database.auto-upgrade'] = true;
require('internal').print('Unpacking old data...');
let result = unpackOldData(engine, version, options, args);
if (result.status !== true) {
return result;
}
require('internal').print('Running upgrade...');
const argv = toArgv(args);
result = pu.executeAndWait(pu.ARANGOD_BIN, argv, options, 'upgrade', tmpDataDir);
if (result.status !== true) {
return {
failed: 1,
'status': false,
'message': 'upgrade result: ' + result.message,
'skipped': false
};
}
args['database.auto-upgrade'] = false;
const testCases = tu.scanTestPaths(['tests/js/server/upgrade-data'])
.filter(byMinimumSuportedVersion(version));
require('internal').print('Checking results...');
return tu.performTests(
options,
testCases,
`upgrade_data_${engine}_${version}`,
tu.runThere,
args);
};
};
exports.setup = function(testFns, defaultFns, opts, fnDocs, optionsDoc) {
const functionsDocumentation = {};
const configurations = fs.list('upgrade-data-tests/data').map(
(filename) => {
const re = /upgrade-data-(mmfiles|rocksdb)-(\d+(?:\.\d+)*)\.tar\.gz/;
const matches = re.exec(filename);
return {
engine: matches[1],
version: matches[2]
};
}
).sort((a, b) => {
if (a.engine < b.engine) return -1;
if (a.engine > b.engine) return 1;
return compareSemVer(a.version, b.version);
});
for (let i = 0; i < configurations.length; i++) {
const {
engine,
version
} = configurations[i];
const testName = `upgrade_data_${engine}_${version}`;
testFns[testName] = upgradeData(engine, version);
defaultFns.push(testName);
functionsDocumentation[testName] =
`test upgrade from version ${version} using ${engine} engine`;
}
opts['upgradeDataPath'] = 'upgrade-data-tests';
opts['upgradeDataMissingShouldFail'] = false;
/* jshint forin: false */
for (var attrname in functionsDocumentation) {
fnDocs[attrname] = functionsDocumentation[attrname];
}
for (var i = 0; i < optionsDocumentation.length; i++) {
optionsDoc.push(optionsDocumentation[i]);
}
};

View File

@ -65,12 +65,6 @@ using namespace arangodb::basics;
_invalid_parameter_handler oldInvalidHandleHandler;
_invalid_parameter_handler newInvalidHandleHandler;
// Windows variant for unistd.h's ftruncate()
int ftruncate(int fd, long newSize) {
int result = _chsize(fd, newSize);
return result;
}
// Windows variant for getpagesize()
int getpagesize(void) {
static int pageSize = 0; // only define it once

View File

@ -44,13 +44,6 @@ int initializeWindows(const TRI_win_initialize_e, char const*);
void ADB_WindowsEntryFunction();
void ADB_WindowsExitFunction(int exitCode, void* data);
// .............................................................................
// windows equivalent of ftruncate (the truncation of an open file) is
// _chsize
// .............................................................................
int ftruncate(int, long);
// .............................................................................
// windows does not have a function called getpagesize -- create one here
// .............................................................................

View File

@ -21,6 +21,8 @@ function help() {
echo " -B/--bin-dir ArangoDB binary dir (default: ./build)"
echo " -O/--ongoing-ports Ongoing ports (default: false)"
echo " --rr Run arangod with rr (true|false default: false)"
echo " --cluster-init Use cluster-init dir (default: false)"
echo " --auto-upgrade Use for upgrade (default: false)"
echo ""
echo "EXAMPLES:"
echo " $0"
@ -121,6 +123,13 @@ while [[ -n "$1" ]]; do
ONGOING_PORTS=${2}
shift
;;
--cluster-init)
shift
;;
--auto-upgrade)
AUTOUPGRADE=${2}
shift
;;
--rr)
USE_RR=${2}
if [ "$USE_RR" != "false" ] && [ "$USE_RR" != "true" ] ; then

View File

@ -63,6 +63,10 @@ else
fi
DEFAULT_REPLICATION=""
if [ "$AUTOUPGRADE" == "1" ];then
echo "-- Using autoupgrade procedure"
fi
if [[ $NRAGENTS -le 0 ]]; then
echo "you need as least one agent currently you have $NRAGENTS"
exit 1
@ -133,6 +137,34 @@ for aid in `seq 0 $(( $NRAGENTS - 1 ))`; do
[ "$INTERACTIVE_MODE" == "R" ] && sleep 1
PORT=$(( $AG_BASE + $aid ))
AGENCY_ENDPOINTS+="--cluster.agency-endpoint $TRANSPORT://$ADDRESS:$PORT "
if [ "$AUTOUPGRADE" == "1" ];then
$ARANGOD \
-c none \
--agency.activate true \
--agency.compaction-step-size $COMP \
--agency.compaction-keep-size $KEEP \
--agency.endpoint $TRANSPORT://$ENDPOINT:$AG_BASE \
--agency.my-address $TRANSPORT://$ADDRESS:$PORT \
--agency.pool-size $NRAGENTS \
--agency.size $NRAGENTS \
--agency.supervision true \
--agency.supervision-frequency $SFRE \
--agency.supervision-grace-period 5.0 \
--agency.wait-for-sync false \
--database.directory cluster/data$PORT \
--javascript.enabled false \
--server.endpoint $TRANSPORT://$ENDPOINT:$PORT \
--server.statistics false \
--log.file cluster/$PORT.log \
--log.force-direct true \
--log.level $LOG_LEVEL_AGENCY \
--javascript.allow-admin-execute true \
$STORAGE_ENGINE \
$AUTHENTICATION \
$SSLKEYFILE \
--database.auto-upgrade true \
2>&1 | tee cluster/$PORT.stdout
fi
$ARANGOD \
-c none \
--agency.activate true \
@ -187,6 +219,29 @@ start() {
mkdir -p cluster/data$PORT cluster/apps$PORT
echo == Starting $TYPE on port $PORT
[ "$INTERACTIVE_MODE" == "R" ] && sleep 1
if [ "$AUTOUPGRADE" == "1" ];then
$CMD \
-c none \
--database.directory cluster/data$PORT \
--cluster.agency-endpoint $TRANSPORT://$ENDPOINT:$AG_BASE \
--cluster.my-address $TRANSPORT://$ADDRESS:$PORT \
--server.endpoint $TRANSPORT://$ENDPOINT:$PORT \
--cluster.my-role $ROLE \
--log.file cluster/$PORT.log \
--log.level $LOG_LEVEL \
--server.statistics true \
--javascript.startup-directory $SRC_DIR/js \
--javascript.module-directory $SRC_DIR/enterprise/js \
--javascript.app-path cluster/apps$PORT \
--log.force-direct true \
--log.level $LOG_LEVEL_CLUSTER \
--javascript.allow-admin-execute true \
$STORAGE_ENGINE \
$AUTHENTICATION \
$SSLKEYFILE \
--database.auto-upgrade true \
2>&1 | tee cluster/$PORT.stdout
fi
$CMD \
-c none \
--database.directory cluster/data$PORT \

View File

@ -950,6 +950,8 @@ arangodb::Result PhysicalCollectionMock::updateProperties(arangodb::velocypack::
return arangodb::Result(TRI_ERROR_NO_ERROR); // assume mock collection updated OK
}
bool PhysicalCollectionMock::hasAllPersistentLocalIds() const { return true; }
std::function<void()> StorageEngineMock::before = []()->void {};
bool StorageEngineMock::inRecoveryResult = false;

View File

@ -97,6 +97,7 @@ class PhysicalCollectionMock: public arangodb::PhysicalCollection {
virtual void load() override {}
virtual void unload() override {}
virtual arangodb::Result updateProperties(arangodb::velocypack::Slice const& slice, bool doSync) override;
virtual bool hasAllPersistentLocalIds() const override;
private:
bool addIndex(std::shared_ptr<arangodb::Index> idx);

View File

@ -0,0 +1,106 @@
/* jshint globalstrict:false, strict:false, unused : false */
/* global assertEqual, assertTrue, assertFalse, assertNull, assertUndefined,
fail, AQL_EXECUTE */
////////////////////////////////////////////////////////////////////////////////
/// @brief recovery tests for views
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License")
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Copyright 2013, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var arangodb = require('@arangodb');
var db = arangodb.db;
var errors = arangodb.errors;
var internal = require('internal');
var jsunity = require('jsunity');
function runSetup () {
'use strict';
internal.debugClearFailAt();
internal.debugSetFailAt("MMFilesCompatibility33");
db._drop('UnitTestsRecoveryDummy');
var c = db._create('UnitTestsRecoveryDummy');
for (let i = 0; i < 1000; i++) {
c.save({ a: "foo_" + i, b: "bar_" + i, c: i });
}
internal.wal.flush(true, true, true);
internal.debugSegfault('crashing server');
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
'use strict';
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {},
tearDown: function () {},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we properly disallow the link
////////////////////////////////////////////////////////////////////////////////
testIResearchDisallowLinkToCollectionWithoutPersistentIDs: function () {
db._dropView('UnitTestsRecoveryView');
let view = db._createView('UnitTestsRecoveryView', 'arangosearch', {});
var meta = { links: { 'UnitTestsRecoveryDummy': { includeAllFields: true } } };
try {
view.properties(meta);
fail();
} catch (e) {
assertEqual(e.errorNum, errors.ERROR_ARANGO_INDEX_CREATION_FAILED.code);
}
var links = view.properties().links;
assertUndefined(links['UnitTestsRecoveryDummy']);
view.drop();
assertNull(db._view('UnitTestsRecoveryView'));
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
'use strict';
if (argv[1] === 'setup') {
runSetup();
return 0;
} else {
jsunity.run(recoverySuite);
return jsunity.writeDone().status ? 0 : 1;
}
}

View File

@ -0,0 +1,143 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertTrue, assertEqual, assertTypeOf, assertNotEqual, fail */
////////////////////////////////////////////////////////////////////////////////
/// @brief test the view interface
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Dan Larkin-York
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var arangodb = require("@arangodb");
var db = require('internal').db;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite: upgrade with data from mmfiles instance
////////////////////////////////////////////////////////////////////////////////
function UpgradeData () {
'use strict';
return {
////////////////////////////////////////////////////////////////////////////
/// @brief bad name (empty)
////////////////////////////////////////////////////////////////////////////
testLargeCollection : function () {
const c = db._collection('LargeCollection');
assertEqual(c.count(), 10000);
// verify documents and contents
for (let i = 0; i < 10000; i++) {
const doc = c.document( { _key: `key${i}` } );
assertEqual(doc.even, ( ( i % 2 ) === 0 ));
assertEqual(doc.name, `Name ${i}`);
assertEqual(doc.num, i);
assertEqual(doc.num100, i % 100);
}
// verify indexes
const indices = c.getIndexes();
assertEqual(indices.length, 5);
// primary
assertEqual(indices[0].type, "primary");
assertEqual(indices[0].unique, true);
// unique hash
assertEqual(indices[1].type, "hash");
assertEqual(indices[1].unique, true);
assertEqual(indices[1].fields, [ "num" ]);
const uhQuery =
`FOR doc in LargeCollection
FILTER doc.num == 8 || doc.num == 8001
SORT doc.num ASC
RETURN doc`;
const uhExplain = db._createStatement(uhQuery).explain({});
assertNotEqual(-1, uhExplain.plan.rules.indexOf('use-indexes'));
const uhResults = db._query(uhQuery).toArray();
assertEqual(uhResults.length, 2);
assertEqual(uhResults[0].num, 8);
assertEqual(uhResults[1].num, 8001);
// non-unique hash
assertEqual(indices[2].type, "hash");
assertEqual(indices[2].unique, false);
assertEqual(indices[2].fields, [ "even" ]);
const nhQuery =
`FOR doc in LargeCollection
FILTER doc.even == true
RETURN doc`;
const nhExplain = db._createStatement(nhQuery).explain({});
assertNotEqual(-1, nhExplain.plan.rules.indexOf('use-indexes'));
const nhResults = db._query(nhQuery).toArray();
assertEqual(nhResults.length, 5000);
nhResults.forEach( ( doc ) => { assertTrue(doc.even); } );
// unique skiplist
assertEqual(indices[3].type, "skiplist");
assertEqual(indices[3].unique, true);
assertEqual(indices[3].fields, [ "name" ]);
const usQuery =
`FOR doc in LargeCollection
FILTER doc.name >= "Name 1114" && doc.name <= "Name 1117"
SORT doc.name ASC
RETURN doc`;
const usExplain = db._createStatement(usQuery).explain({});
assertNotEqual(-1, usExplain.plan.rules.indexOf('use-indexes'));
const usResults = db._query(usQuery).toArray();
assertEqual(usResults.length, 4);
assertEqual(usResults[0].name, "Name 1114");
assertEqual(usResults[1].name, "Name 1115");
assertEqual(usResults[2].name, "Name 1116");
assertEqual(usResults[3].name, "Name 1117");
// non-unique skiplist
assertEqual(indices[4].type, "skiplist");
assertEqual(indices[4].unique, false);
assertEqual(indices[4].fields, [ "num100" ]);
const nsQuery =
`FOR doc in LargeCollection
FILTER doc.num100 == 57
RETURN doc`;
const nsExplain = db._createStatement(nsQuery).explain({});
assertNotEqual(-1, nsExplain.plan.rules.indexOf('use-indexes'));
const nsResults = db._query(nsQuery).toArray();
assertEqual(nsResults.length, 100);
nsResults.forEach( ( doc ) => { assertEqual(doc.num100, 57); } );
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
jsunity.run(UpgradeData);
return jsunity.done();

View File

@ -0,0 +1,83 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual, assertTrue, assertEqual, assertTypeOf, assertNotEqual, fail */
////////////////////////////////////////////////////////////////////////////////
/// @brief test the view interface
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Dan Larkin-York
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var arangodb = require("@arangodb");
var db = require('internal').db;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite: upgrade with data from mmfiles instance
////////////////////////////////////////////////////////////////////////////////
function UpgradeData() {
'use strict';
return {
////////////////////////////////////////////////////////////////////////////
/// @brief bad name (empty)
////////////////////////////////////////////////////////////////////////////
testViewCreationWithExistingCollection: function() {
const c = db._collection('LargeCollection');
assertEqual(c.count(), 10000);
const v = db._createView('TestView', 'arangosearch', {});
const properties = {
links: {
'LargeCollection': {
includeAllFields: true
}
}
};
v.properties(properties);
const query =
`FOR doc in TestView
SEARCH(doc.name >= "Name 1114" && doc.name <= "Name 1117")
OPTIONS { waitForSync: true }
SORT doc.name ASC
RETURN doc`;
const results = db._query(query).toArray();
assertEqual(results.length, 4);
assertEqual(results[0].name, "Name 1114");
assertEqual(results[1].name, "Name 1115");
assertEqual(results[2].name, "Name 1116");
assertEqual(results[3].name, "Name 1117");
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
jsunity.run(UpgradeData);
return jsunity.done();