diff --git a/.gitignore b/.gitignore index 010e21f92d..839f567aec 100644 --- a/.gitignore +++ b/.gitignore @@ -25,6 +25,7 @@ core.* *.lnk Thumbs.db +enterprise compile_commands.json instanceinfo.json testresult.json diff --git a/CMakeLists.txt b/CMakeLists.txt index 54ce226e3b..51500dce7e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -49,6 +49,15 @@ if (USE_DEV_TIMERS) add_definitions("-DUSE_DEV_TIMERS=1") endif () +# enable enterprise features +set(ENTERPRISE_INCLUDE_DIR "enterprise") +option(USE_ENTERPRISE OFF) + +if (USE_ENTERPRISE) + add_definitions("-DUSE_ENTERPRISE=1") + include_directories(${ENTERPRISE_INCLUDE_DIR}) +endif () + ################################################################################ ## ARANGODB ################################################################################ diff --git a/arangod/Agency/AddFollower.cpp b/arangod/Agency/AddFollower.cpp index 20b8ebf9e7..629c0284ed 100644 --- a/arangod/Agency/AddFollower.cpp +++ b/arangod/Agency/AddFollower.cpp @@ -245,7 +245,7 @@ JOB_STATUS AddFollower::status () { if (status == PENDING) { std::string curPath = curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers"; - + Slice current = _snapshot(curPath).slice(); for (auto const& srv : VPackArrayIterator(current)) { if (srv.copyString() == _newFollower) { @@ -254,6 +254,7 @@ JOB_STATUS AddFollower::status () { } } } + } return status; diff --git a/arangod/Cluster/TraverserEngine.cpp b/arangod/Cluster/TraverserEngine.cpp index 7cda79f31a..beec5a7ee2 100644 --- a/arangod/Cluster/TraverserEngine.cpp +++ b/arangod/Cluster/TraverserEngine.cpp @@ -127,6 +127,7 @@ TraverserEngine::TraverserEngine(TRI_vocbase_t* vocbase, } TraverserEngine::~TraverserEngine() { + /* auto resolver = _trx->resolver(); // TODO Do we need this or will delete trx do this already? for (auto const& shard : _locked) { @@ -141,9 +142,10 @@ TraverserEngine::~TraverserEngine() { << TRI_errno_string(res); } } - if (_trx != nullptr) { + */ + if (_trx) { + _trx->commit(); delete _trx; - _trx = nullptr; } if (_query != nullptr) { delete _query; @@ -178,7 +180,7 @@ void TraverserEngine::getEdges(VPackSlice vertex, size_t depth, VPackBuilder& bu // Result now contains all valid edges, probably multiples. } } else if (vertex.isString()) { - auto edgeCursor = _opts->nextCursor(vertex, depth); + std::unique_ptr edgeCursor(_opts->nextCursor(vertex, depth)); while(edgeCursor->next(result, cursorId)) { if (!_opts->evaluateEdgeExpression(result.back(), vertex, depth, cursorId)) { diff --git a/arangod/Cluster/TraverserEngineRegistry.cpp b/arangod/Cluster/TraverserEngineRegistry.cpp index 8ee8d2742f..8e19f738ba 100644 --- a/arangod/Cluster/TraverserEngineRegistry.cpp +++ b/arangod/Cluster/TraverserEngineRegistry.cpp @@ -42,37 +42,21 @@ TraverserEngineRegistry::EngineInfo::~EngineInfo() { } TraverserEngineRegistry::~TraverserEngineRegistry() { - std::vector toDelete; - { - WRITE_LOCKER(writeLocker, _lock); - try { - for (auto const& it : _engines) { - toDelete.emplace_back(it.first); - } - } catch (...) { - // the emplace_back() above might fail - // prevent throwing exceptions in the destructor - } - } - - // note: destroy() will acquire _lock itself, so it must be called without - // holding the lock - for (auto& p : toDelete) { - try { // just in case - destroy(p); - } catch (...) { - } + WRITE_LOCKER(writeLocker, _lock); + for (auto const& it : _engines) { + destroy(it.first, false); } } /// @brief Create a new Engine and return it's id TraverserEngineID TraverserEngineRegistry::createNew(TRI_vocbase_t* vocbase, VPackSlice engineInfo) { - WRITE_LOCKER(writeLocker, _lock); TraverserEngineID id = TRI_NewTickServer(); TRI_ASSERT(id != 0); - TRI_ASSERT(_engines.find(id) == _engines.end()); auto info = std::make_unique(vocbase, engineInfo); + + WRITE_LOCKER(writeLocker, _lock); + TRI_ASSERT(_engines.find(id) == _engines.end()); _engines.emplace(id, info.get()); info.release(); return id; @@ -80,23 +64,7 @@ TraverserEngineID TraverserEngineRegistry::createNew(TRI_vocbase_t* vocbase, /// @brief Destroy the engine with the given id void TraverserEngineRegistry::destroy(TraverserEngineID id) { - WRITE_LOCKER(writeLocker, _lock); - auto e = _engines.find(id); - if (e == _engines.end()) { - // Nothing to destroy - // TODO: Should we throw an error instead? - return; - } - // TODO what about shard locking? - // TODO what about multiple dbs? - if (e->second->_isInUse) { - // Someone is still working with this engine. - // TODO can we just delete it? Or throw an error? - THROW_ARANGO_EXCEPTION(TRI_ERROR_DEADLOCK); - } - - delete e->second; - _engines.erase(id); + destroy(id, true); } /// @brief Get the engine with the given id @@ -131,3 +99,30 @@ void TraverserEngineRegistry::returnEngine(TraverserEngineID id) { } // TODO Should we throw an error if we are not allowed to return this } + +/// @brief Destroy the engine with the given id, worker function +void TraverserEngineRegistry::destroy(TraverserEngineID id, bool doLock) { + EngineInfo* engine = nullptr; + + { + CONDITIONAL_WRITE_LOCKER(writeLocker, _lock, doLock); + auto e = _engines.find(id); + if (e == _engines.end()) { + // Nothing to destroy + // TODO: Should we throw an error instead? + return; + } + // TODO what about shard locking? + // TODO what about multiple dbs? + if (e->second->_isInUse) { + // Someone is still working with this engine. + // TODO can we just delete it? Or throw an error? + THROW_ARANGO_EXCEPTION(TRI_ERROR_DEADLOCK); + } + + engine = e->second; + _engines.erase(id); + } + + delete engine; +} diff --git a/arangod/Cluster/TraverserEngineRegistry.h b/arangod/Cluster/TraverserEngineRegistry.h index 588701062b..a6cc98403e 100644 --- a/arangod/Cluster/TraverserEngineRegistry.h +++ b/arangod/Cluster/TraverserEngineRegistry.h @@ -64,6 +64,8 @@ class TraverserEngineRegistry { void returnEngine(TraverserEngineID); private: + + void destroy(TraverserEngineID, bool doLock); struct EngineInfo { bool _isInUse; // Flag if this engine is in use diff --git a/arangod/RestServer/BootstrapFeature.cpp b/arangod/RestServer/BootstrapFeature.cpp index ea4d970698..c3d816453e 100644 --- a/arangod/RestServer/BootstrapFeature.cpp +++ b/arangod/RestServer/BootstrapFeature.cpp @@ -35,6 +35,10 @@ #include "RestServer/DatabaseFeature.h" #include "V8Server/V8DealerFeature.h" +#ifdef USE_ENTERPRISE +#include "Enterprise/Version.h" +#endif + using namespace arangodb; using namespace arangodb::application_features; using namespace arangodb::options; @@ -155,11 +159,17 @@ void BootstrapFeature::start() { // Start service properly: rest::RestHandlerFactory::setMaintenance(false); + +#ifdef USE_ENTERPRISE + LOG(INFO) << "ArangoDB (enterprise version " << ARANGODB_VERSION_FULL + << " / " << ENTERPRISE_VERSION << ") is ready for business. Have fun!"; +#else LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL << ") is ready for business. Have fun!"; +#endif if (_bark) { - LOG(INFO) << "der Hund so: wau wau!"; + LOG(INFO) << "The dog says: wau wau!"; } _isReady = true; diff --git a/arangod/StorageEngine/MMFilesCollection.cpp b/arangod/StorageEngine/MMFilesCollection.cpp index 9f83957b65..1e7e593e44 100644 --- a/arangod/StorageEngine/MMFilesCollection.cpp +++ b/arangod/StorageEngine/MMFilesCollection.cpp @@ -306,50 +306,7 @@ static bool OpenIterator(TRI_df_marker_t const* marker, OpenIteratorState* data, return (res == TRI_ERROR_NO_ERROR); } -/// @brief iterate all markers of the collection -static int IterateMarkersCollection(arangodb::Transaction* trx, - LogicalCollection* collection) { - // initialize state for iteration - OpenIteratorState openState(collection); - - if (collection->getPhysical()->initialCount() != -1) { - auto primaryIndex = collection->primaryIndex(); - - int res = primaryIndex->resize( - trx, static_cast(collection->getPhysical()->initialCount() * 1.1)); - - if (res != TRI_ERROR_NO_ERROR) { - return res; - } - - openState._initialCount = collection->getPhysical()->initialCount(); - } - - // read all documents and fill primary index - auto cb = [&openState](TRI_df_marker_t const* marker, TRI_datafile_t* datafile) -> bool { - return OpenIterator(marker, &openState, datafile); - }; - - collection->iterateDatafiles(cb); - - LOG(TRACE) << "found " << openState._documents << " document markers, " - << openState._deletions << " deletion markers for collection '" << collection->name() << "'"; - - // update the real statistics for the collection - try { - for (auto& it : openState._stats) { - collection->createStats(it.first, *(it.second)); - } - } catch (basics::Exception const& ex) { - return ex.code(); - } catch (...) { - return TRI_ERROR_INTERNAL; - } - - return TRI_ERROR_NO_ERROR; -} - -} +} // namespace MMFilesCollection::MMFilesCollection(LogicalCollection* collection) : PhysicalCollection(collection), _ditches(collection), _initialCount(0), _revision(0) {} @@ -1138,94 +1095,44 @@ void MMFilesCollection::finishCompaction() { _compactionLock.unlock(); } -/// @brief opens an existing collection -void MMFilesCollection::open(bool ignoreErrors) { - TRI_vocbase_t* vocbase = _logicalCollection->vocbase(); +/// @brief iterate all markers of the collection +int MMFilesCollection::iterateMarkersOnLoad(arangodb::Transaction* trx) { + // initialize state for iteration + OpenIteratorState openState(_logicalCollection); - VPackBuilder builder; - StorageEngine* engine = EngineSelectorFeature::ENGINE; - engine->getCollectionInfo(vocbase, _logicalCollection->cid(), builder, false, 0); + if (_initialCount != -1) { + auto primaryIndex = _logicalCollection->primaryIndex(); - double start = TRI_microtime(); + int res = primaryIndex->resize( + trx, static_cast(_initialCount * 1.1)); - LOG_TOPIC(TRACE, Logger::PERFORMANCE) - << "open-document-collection { collection: " << vocbase->name() << "/" - << _logicalCollection->name() << " }"; - - int res = _logicalCollection->open(ignoreErrors); - - if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot open document collection from path '" << _logicalCollection->path() << "'"; - THROW_ARANGO_EXCEPTION(res); - } - - res = _logicalCollection->createInitialIndexes(); - - if (res != TRI_ERROR_NO_ERROR) { - LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res); - THROW_ARANGO_EXCEPTION(res); - } - - arangodb::SingleCollectionTransaction trx( - arangodb::StandaloneTransactionContext::Create(vocbase), - _logicalCollection->cid(), TRI_TRANSACTION_WRITE); - - // build the primary index - res = TRI_ERROR_INTERNAL; - - try { - double start = TRI_microtime(); - - LOG_TOPIC(TRACE, Logger::PERFORMANCE) - << "iterate-markers { collection: " << vocbase->name() << "/" - << _logicalCollection->name() << " }"; - - // iterate over all markers of the collection - res = IterateMarkersCollection(&trx, _logicalCollection); - - LOG_TOPIC(TRACE, Logger::PERFORMANCE) << "[timer] " << Logger::FIXED(TRI_microtime() - start) << " s, iterate-markers { collection: " << vocbase->name() << "/" << _logicalCollection->name() << " }"; - } catch (arangodb::basics::Exception const& ex) { - res = ex.code(); - } catch (std::bad_alloc const&) { - res = TRI_ERROR_OUT_OF_MEMORY; - } catch (...) { - res = TRI_ERROR_INTERNAL; - } - - if (res != TRI_ERROR_NO_ERROR) { - THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot iterate data of document collection: ") + TRI_errno_string(res)); - } - - // build the indexes meta-data, but do not fill the indexes yet - { - auto old = _logicalCollection->useSecondaryIndexes(); - - // turn filling of secondary indexes off. we're now only interested in getting - // the indexes' definition. we'll fill them below ourselves. - _logicalCollection->useSecondaryIndexes(false); - - try { - _logicalCollection->detectIndexes(&trx); - _logicalCollection->useSecondaryIndexes(old); - } catch (basics::Exception const& ex) { - _logicalCollection->useSecondaryIndexes(old); - THROW_ARANGO_EXCEPTION_MESSAGE(ex.code(), std::string("cannot initialize collection indexes: ") + ex.what()); - } catch (std::exception const& ex) { - _logicalCollection->useSecondaryIndexes(old); - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, std::string("cannot initialize collection indexes: ") + ex.what()); - } catch (...) { - _logicalCollection->useSecondaryIndexes(old); - THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot initialize collection indexes: unknown exception"); + if (res != TRI_ERROR_NO_ERROR) { + return res; } + + openState._initialCount = _initialCount; } - if (!arangodb::wal::LogfileManager::instance()->isInRecovery()) { - // build the index structures, and fill the indexes - _logicalCollection->fillIndexes(&trx); + // read all documents and fill primary index + auto cb = [&openState](TRI_df_marker_t const* marker, TRI_datafile_t* datafile) -> bool { + return OpenIterator(marker, &openState, datafile); + }; + + iterateDatafiles(cb); + + LOG(TRACE) << "found " << openState._documents << " document markers, " + << openState._deletions << " deletion markers for collection '" << _logicalCollection->name() << "'"; + + // update the real statistics for the collection + try { + for (auto& it : openState._stats) { + createStats(it.first, *(it.second)); + } + } catch (basics::Exception const& ex) { + return ex.code(); + } catch (...) { + return TRI_ERROR_INTERNAL; } - LOG_TOPIC(TRACE, Logger::PERFORMANCE) - << "[timer] " << Logger::FIXED(TRI_microtime() - start) - << " s, open-document-collection { collection: " << vocbase->name() << "/" - << _logicalCollection->name() << " }"; + return TRI_ERROR_NO_ERROR; } diff --git a/arangod/StorageEngine/MMFilesCollection.h b/arangod/StorageEngine/MMFilesCollection.h index c0692ab448..9080f53ad1 100644 --- a/arangod/StorageEngine/MMFilesCollection.h +++ b/arangod/StorageEngine/MMFilesCollection.h @@ -95,9 +95,6 @@ class MMFilesCollection final : public PhysicalCollection { /// @brief seal a datafile int sealDatafile(TRI_datafile_t* datafile, bool isCompactor); - /// @brief iterates over a collection - bool iterateDatafiles(std::function const& cb) override; - /// @brief increase dead stats for a datafile, if it exists void increaseDeadStats(TRI_voc_fid_t fid, int64_t number, int64_t size) override { _datafileStatistics.increaseDead(fid, number, size); @@ -108,11 +105,6 @@ class MMFilesCollection final : public PhysicalCollection { _datafileStatistics.update(fid, values); } - /// @brief create statistics for a datafile, using the stats provided - void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) override { - _datafileStatistics.create(fid, values); - } - /// @brief order a new master pointer TRI_doc_mptr_t* requestMasterpointer() override; @@ -129,11 +121,20 @@ class MMFilesCollection final : public PhysicalCollection { bool tryLockForCompaction() override; void finishCompaction() override; - void open(bool ignoreErrors) override; - Ditches* ditches() const override { return &_ditches; } + + /// @brief iterate all markers of a collection on load + int iterateMarkersOnLoad(arangodb::Transaction* trx) override; private: + /// @brief create statistics for a datafile, using the stats provided + void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) { + _datafileStatistics.create(fid, values); + } + + /// @brief iterates over a collection + bool iterateDatafiles(std::function const& cb); + /// @brief creates a datafile TRI_datafile_t* createDatafile(TRI_voc_fid_t fid, TRI_voc_size_t journalSize, diff --git a/arangod/V8Server/v8-vocindex.cpp b/arangod/V8Server/v8-vocindex.cpp index 93d554f527..aebc18728e 100644 --- a/arangod/V8Server/v8-vocindex.cpp +++ b/arangod/V8Server/v8-vocindex.cpp @@ -653,7 +653,7 @@ static void CreateCollectionCoordinator( uint64_t replicationFactor = 1; // default shard key - shardKeys.push_back("_key"); + shardKeys.push_back(StaticStrings::KeyString); std::string distributeShardsLike; diff --git a/arangod/VocBase/LogicalCollection.cpp b/arangod/VocBase/LogicalCollection.cpp index 109e3959e5..5614fd2631 100644 --- a/arangod/VocBase/LogicalCollection.cpp +++ b/arangod/VocBase/LogicalCollection.cpp @@ -48,6 +48,8 @@ #include "Utils/CollectionNameResolver.h" #include "Utils/CollectionReadLocker.h" #include "Utils/CollectionWriteLocker.h" +#include "Utils/SingleCollectionTransaction.h" +#include "Utils/StandaloneTransactionContext.h" #include "VocBase/PhysicalCollection.h" #include "VocBase/IndexPoolFeature.h" #include "VocBase/KeyGenerator.h" @@ -1115,7 +1117,97 @@ PhysicalCollection* LogicalCollection::createPhysical() { } /// @brief opens an existing collection -int LogicalCollection::open(bool ignoreErrors) { +void LogicalCollection::open(bool ignoreErrors) { + VPackBuilder builder; + StorageEngine* engine = EngineSelectorFeature::ENGINE; + engine->getCollectionInfo(_vocbase, cid(), builder, false, 0); + + double start = TRI_microtime(); + + LOG_TOPIC(TRACE, Logger::PERFORMANCE) + << "open-document-collection { collection: " << _vocbase->name() << "/" + << _name << " }"; + + int res = openWorker(ignoreErrors); + + if (res != TRI_ERROR_NO_ERROR) { + LOG(ERR) << "cannot open document collection from path '" << path() << "'"; + THROW_ARANGO_EXCEPTION(res); + } + + res = createInitialIndexes(); + + if (res != TRI_ERROR_NO_ERROR) { + LOG(ERR) << "cannot initialize document collection: " << TRI_errno_string(res); + THROW_ARANGO_EXCEPTION(res); + } + + arangodb::SingleCollectionTransaction trx( + arangodb::StandaloneTransactionContext::Create(_vocbase), + cid(), TRI_TRANSACTION_WRITE); + + // build the primary index + res = TRI_ERROR_INTERNAL; + + try { + double start = TRI_microtime(); + + LOG_TOPIC(TRACE, Logger::PERFORMANCE) + << "iterate-markers { collection: " << _vocbase->name() << "/" + << _name << " }"; + + // iterate over all markers of the collection + res = getPhysical()->iterateMarkersOnLoad(&trx); + + LOG_TOPIC(TRACE, Logger::PERFORMANCE) << "[timer] " << Logger::FIXED(TRI_microtime() - start) << " s, iterate-markers { collection: " << _vocbase->name() << "/" << _name << " }"; + } catch (arangodb::basics::Exception const& ex) { + res = ex.code(); + } catch (std::bad_alloc const&) { + res = TRI_ERROR_OUT_OF_MEMORY; + } catch (...) { + res = TRI_ERROR_INTERNAL; + } + + if (res != TRI_ERROR_NO_ERROR) { + THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot iterate data of document collection: ") + TRI_errno_string(res)); + } + + // build the indexes meta-data, but do not fill the indexes yet + { + auto old = useSecondaryIndexes(); + + // turn filling of secondary indexes off. we're now only interested in getting + // the indexes' definition. we'll fill them below ourselves. + useSecondaryIndexes(false); + + try { + detectIndexes(&trx); + useSecondaryIndexes(old); + } catch (basics::Exception const& ex) { + useSecondaryIndexes(old); + THROW_ARANGO_EXCEPTION_MESSAGE(ex.code(), std::string("cannot initialize collection indexes: ") + ex.what()); + } catch (std::exception const& ex) { + useSecondaryIndexes(old); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, std::string("cannot initialize collection indexes: ") + ex.what()); + } catch (...) { + useSecondaryIndexes(old); + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot initialize collection indexes: unknown exception"); + } + } + + if (!arangodb::wal::LogfileManager::instance()->isInRecovery()) { + // build the index structures, and fill the indexes + fillIndexes(&trx); + } + + LOG_TOPIC(TRACE, Logger::PERFORMANCE) + << "[timer] " << Logger::FIXED(TRI_microtime() - start) + << " s, open-document-collection { collection: " << _vocbase->name() << "/" + << _name << " }"; +} + +/// @brief opens an existing collection +int LogicalCollection::openWorker(bool ignoreErrors) { StorageEngine* engine = EngineSelectorFeature::ENGINE; double start = TRI_microtime(); diff --git a/arangod/VocBase/LogicalCollection.h b/arangod/VocBase/LogicalCollection.h index c53e511d87..1b8ca23538 100644 --- a/arangod/VocBase/LogicalCollection.h +++ b/arangod/VocBase/LogicalCollection.h @@ -237,7 +237,7 @@ class LogicalCollection { /// @brief opens an existing collection - int open(bool ignoreErrors); + void open(bool ignoreErrors); /// @brief closes an open collection int close(); @@ -249,11 +249,6 @@ class LogicalCollection { return getPhysical()->rotateActiveJournal(); } - /// @brief iterates over a collection - bool iterateDatafiles(std::function const& callback) { - return getPhysical()->iterateDatafiles(callback); - } - /// @brief increase dead stats for a datafile, if it exists void increaseDeadStats(TRI_voc_fid_t fid, int64_t number, int64_t size) { return getPhysical()->increaseDeadStats(fid, number, size); @@ -264,12 +259,6 @@ class LogicalCollection { return getPhysical()->updateStats(fid, values); } - /// @brief create statistics for a datafile, using the stats provided - void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) { - return getPhysical()->createStats(fid, values); - } - - int applyForTickRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax, std::function const& callback) { return getPhysical()->applyForTickRange(dataMin, dataMax, callback); @@ -284,7 +273,7 @@ class LogicalCollection { void releaseMasterpointer(TRI_doc_mptr_t* mptr) { getPhysical()->releaseMasterpointer(mptr); } - + /// @brief disallow starting the compaction of the collection void preventCompaction() { getPhysical()->preventCompaction(); } bool tryPreventCompaction() { return getPhysical()->tryPreventCompaction(); } @@ -369,11 +358,11 @@ class LogicalCollection { // SECTION: Index creation - /// @brief creates the initial indexes for the collection - public: - // FIXME Should be private - int createInitialIndexes(); private: + /// @brief creates the initial indexes for the collection + int createInitialIndexes(); + + int openWorker(bool ignoreErrors); bool removeIndex(TRI_idx_iid_t iid); diff --git a/arangod/VocBase/PhysicalCollection.h b/arangod/VocBase/PhysicalCollection.h index 038023e5fb..5a63f085fb 100644 --- a/arangod/VocBase/PhysicalCollection.h +++ b/arangod/VocBase/PhysicalCollection.h @@ -37,6 +37,7 @@ struct TRI_doc_mptr_t; namespace arangodb { class Ditches; class LogicalCollection; +class Transaction; class PhysicalCollection { protected: @@ -66,18 +67,12 @@ class PhysicalCollection { virtual int applyForTickRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t dataMax, std::function const& callback) = 0; - /// @brief iterates over a collection - virtual bool iterateDatafiles(std::function const& cb) = 0; - /// @brief increase dead stats for a datafile, if it exists virtual void increaseDeadStats(TRI_voc_fid_t fid, int64_t number, int64_t size) = 0; /// @brief increase dead stats for a datafile, if it exists virtual void updateStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) = 0; - /// @brief create statistics for a datafile, using the stats provided - virtual void createStats(TRI_voc_fid_t fid, DatafileStatisticsContainer const& values) = 0; - /// @brief report extra memory used by indexes etc. virtual size_t memory() const = 0; @@ -107,9 +102,10 @@ class PhysicalCollection { /// @brief signal that compaction is finished virtual void finishCompaction() = 0; - - virtual void open(bool ignoreErrors) = 0; + /// @brief iterate all markers of a collection on load + virtual int iterateMarkersOnLoad(arangodb::Transaction* trx) = 0; + protected: LogicalCollection* _logicalCollection; }; diff --git a/arangod/VocBase/vocbase.cpp b/arangod/VocBase/vocbase.cpp index 364218224f..16f1d4eb92 100644 --- a/arangod/VocBase/vocbase.cpp +++ b/arangod/VocBase/vocbase.cpp @@ -448,7 +448,7 @@ int TRI_vocbase_t::loadCollection(arangodb::LogicalCollection* collection, } try { - collection->getPhysical()->open(ignoreDatafileErrors); + collection->open(ignoreDatafileErrors); } catch (...) { collection->setStatus(TRI_VOC_COL_STATUS_CORRUPTED); return TRI_ERROR_ARANGO_CORRUPTED_COLLECTION; diff --git a/js/client/tests/agency/agency-test.js b/js/client/tests/agency/agency-test.js index f719b600d4..94fe5ef117 100644 --- a/js/client/tests/agency/agency-test.js +++ b/js/client/tests/agency/agency-test.js @@ -614,6 +614,22 @@ function agencyTestSuite () { assertEqual(readAndCheck([["/bumms", "/bummsfallera"]]), [{bumms:"fallera", bummsfallera: "lalalala"}]); } + /* + testHiddenAgencyWrite: function() { + var res = writeAgency([[{".agency": {"op":"set","new":"fallera"}}]]); + assertEqual(res.statusCode, 400); + }, + + testHiddenAgencyWriteSlash: function() { + var res = writeAgency([[{"/.agency": {"op":"set","new":"fallera"}}]]); + assertEqual(res.statusCode, 400); + }, + + testHiddenAgencyWriteDeep: function() { + var res = writeAgency([[{"/.agency/hans": {"op":"set","new":"fallera"}}]]); + assertEqual(res.statusCode, 400); + } + */ }; } diff --git a/lib/Rest/Version.cpp b/lib/Rest/Version.cpp index 29d314b9cc..fab153f745 100644 --- a/lib/Rest/Version.cpp +++ b/lib/Rest/Version.cpp @@ -44,6 +44,10 @@ #include #endif +#ifdef USE_ENTERPRISE +#include "Enterprise/Version.h" +#endif + using namespace arangodb::rest; std::map Version::Values; @@ -96,9 +100,6 @@ void Version::initialize() { Values["asm-crc32"] = (ENABLE_ASM_CRC32) ? "true" : "false"; Values["boost-version"] = getBoostVersion(); Values["build-date"] = getBuildDate(); -#if HAVE_ARANGODB_BUILD_REPOSITORY - Values["build-repository"] = getBuildRepository(); -#endif Values["compiler"] = getCompiler(); Values["endianness"] = getEndianness(); Values["fd-setsize"] = arangodb::basics::StringUtils::itoa(FD_SETSIZE); @@ -113,6 +114,14 @@ void Version::initialize() { Values["zlib-version"] = getZLibVersion(); +#if USE_ENTERPRISE + Values["enterprise-version"] = ENTERPRISE_VERSION; +#endif + +#if HAVE_ARANGODB_BUILD_REPOSITORY + Values["build-repository"] = getBuildRepository(); +#endif + #ifdef ARANGODB_ENABLE_MAINTAINER_MODE Values["assertions"] = "true"; #else