diff --git a/Installation/Jenkins/build.sh b/Installation/Jenkins/build.sh index 325e74139a..b37f90e7a9 100755 --- a/Installation/Jenkins/build.sh +++ b/Installation/Jenkins/build.sh @@ -596,20 +596,26 @@ if test -n "${ENTERPRISE_GIT_URL}" ; then fi echo "I'm on Branch: ${GITARGS}" fi - + if test "${EP_GITARGS}" != "${GITARGS}"; then git checkout master; fi git fetch --tags; - git pull --all; - if test "${EP_GITARGS}" != "${GITARGS}"; then + if git pull --all; then + if test "${EP_GITARGS}" != "${GITARGS}"; then + git checkout ${GITARGS}; + fi + else + git checkout master; + git pull --all; + git fetch --tags; git checkout ${GITARGS}; fi ${FINAL_PULL} ) fi -if test ${DOWNLOAD_STARTER} == 1; then +if test "${DOWNLOAD_STARTER}" == 1; then # we utilize https://developer.github.com/v3/repos/ to get the newest release: STARTER_REV=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases |grep tag_name |head -n 1 |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'` STARTER_URL=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases/tags/${STARTER_REV} |grep browser_download_url |grep "${OSNAME}" |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'` diff --git a/README_maintainers.md b/README_maintainers.md index 5a5c40d683..f23e12f446 100644 --- a/README_maintainers.md +++ b/README_maintainers.md @@ -284,16 +284,16 @@ valgrind could look like this. Options are passed as regular long values in the syntax --option value --sub:option value. Using Valgrind could look like this: ./scripts/unittest single_server --test js/server/tests/aql/aql-escaping.js \ - --extraargs:server.threads 1 \ - --extraargs:scheduler.threads 1 \ - --extraargs:javascript.gc-frequency 1000000 \ - --extraargs:javascript.gc-interval 65536 \ + --extraArgs:server.threads 1 \ + --extraArgs:scheduler.threads 1 \ + --extraArgs:javascript.gc-frequency 1000000 \ + --extraArgs:javascript.gc-interval 65536 \ --javascript.v8-contexts 2 \ --valgrind /usr/bin/valgrind \ --valgrindargs:log-file /tmp/valgrindlog.%p - we specify the test to execute - - we specify some arangod arguments via --extraargs which increase the server performance + - we specify some arangod arguments via --extraArgs which increase the server performance - we specify to run using valgrind (this is supported by all facilities) - we specify some valgrind commandline arguments diff --git a/UnitTests/unittest.js b/UnitTests/unittest.js index def9ccde3a..ded8473cdb 100644 --- a/UnitTests/unittest.js +++ b/UnitTests/unittest.js @@ -178,7 +178,8 @@ function main(argv) { options = internal.parseArgv(argv, 0); // parse option with parseArgv function } } catch (x) { - print("failed to parse the json options: " + x.message); + print("failed to parse the json options: " + x.message + "\n" + String(x.stack)); + print("argv: ", argv); return -1; } } diff --git a/arangod/Cluster/TraverserEngine.cpp b/arangod/Cluster/TraverserEngine.cpp index 0eee6681cc..d8ae0ea766 100644 --- a/arangod/Cluster/TraverserEngine.cpp +++ b/arangod/Cluster/TraverserEngine.cpp @@ -192,9 +192,9 @@ void BaseTraverserEngine::getVertexData(VPackSlice vertex, VPackBuilder& builder auto shards = _vertexShards.find(name); if (shards == _vertexShards.end()) { THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, - "Collection not known to Traversal " + - name + " please add 'WITH " + name + - "' as the first line in your AQL"); + "collection not known to traversal: '" + + name + "'. please add 'WITH " + name + + "' as the first line in your AQL query"); // The collection is not known here! // Maybe handle differently } @@ -245,9 +245,9 @@ void BaseTraverserEngine::getVertexData(VPackSlice vertex, size_t depth, auto shards = _vertexShards.find(name); if (shards == _vertexShards.end()) { THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, - "Collection not known to Traversal " + - name + " please add 'WITH " + name + - "' as the first line in your AQL"); + "collection not known to traversal: '" + + name + "'. please add 'WITH " + name + + "' as the first line in your AQL query"); } builder.add(v); for (std::string const& shard : shards->second) { diff --git a/arangod/Graph/AttributeWeightShortestPathFinder.h b/arangod/Graph/AttributeWeightShortestPathFinder.h index a32fde9f10..fce833a458 100644 --- a/arangod/Graph/AttributeWeightShortestPathFinder.h +++ b/arangod/Graph/AttributeWeightShortestPathFinder.h @@ -206,7 +206,7 @@ class AttributeWeightShortestPathFinder : public ShortestPathFinder { /// @brief create the PathFinder ////////////////////////////////////////////////////////////////////////////// - AttributeWeightShortestPathFinder(ShortestPathOptions* options); + explicit AttributeWeightShortestPathFinder(ShortestPathOptions* options); ~AttributeWeightShortestPathFinder(); diff --git a/arangod/Graph/ConstantWeightShortestPathFinder.h b/arangod/Graph/ConstantWeightShortestPathFinder.h index cf1057251b..b786484869 100644 --- a/arangod/Graph/ConstantWeightShortestPathFinder.h +++ b/arangod/Graph/ConstantWeightShortestPathFinder.h @@ -58,7 +58,7 @@ class ConstantWeightShortestPathFinder : public ShortestPathFinder { }; public: - ConstantWeightShortestPathFinder(ShortestPathOptions* options); + explicit ConstantWeightShortestPathFinder(ShortestPathOptions* options); ~ConstantWeightShortestPathFinder(); diff --git a/arangod/MMFiles/MMFilesPersistentIndexFeature.cpp b/arangod/MMFiles/MMFilesPersistentIndexFeature.cpp index daf99617fc..45969b5b94 100644 --- a/arangod/MMFiles/MMFilesPersistentIndexFeature.cpp +++ b/arangod/MMFiles/MMFilesPersistentIndexFeature.cpp @@ -21,16 +21,17 @@ /// @author Jan Steemann //////////////////////////////////////////////////////////////////////////////// -#include "MMFilesPersistentIndexFeature.h" +#include "ApplicationFeatures/RocksDBOptionFeature.h" #include "Basics/Exceptions.h" #include "Basics/FileUtils.h" #include "Basics/tri-strings.h" #include "Logger/Logger.h" +#include "MMFiles/MMFilesPersistentIndexFeature.h" +#include "MMFiles/MMFilesPersistentIndexKeyComparator.h" #include "ProgramOptions/ProgramOptions.h" #include "ProgramOptions/Section.h" #include "RestServer/DatabasePathFeature.h" -#include "MMFiles/MMFilesPersistentIndexKeyComparator.h" - + #include #include #include @@ -55,18 +56,11 @@ static MMFilesPersistentIndexFeature* Instance = nullptr; MMFilesPersistentIndexFeature::MMFilesPersistentIndexFeature( application_features::ApplicationServer* server) : application_features::ApplicationFeature(server, "MMFilesPersistentIndex"), - _db(nullptr), _comparator(nullptr), _path(), _active(true), - _writeBufferSize(0), _maxWriteBufferNumber(2), - _delayedWriteRate(2 * 1024 * 1024), _minWriteBufferNumberToMerge(1), - _numLevels(4), _maxBytesForLevelBase(256 * 1024 * 1024), - _maxBytesForLevelMultiplier(10), _verifyChecksumsInCompaction(true), - _optimizeFiltersForHits(true), _baseBackgroundCompactions(1), - _maxBackgroundCompactions(1), _maxLogFileSize(0), - _keepLogFileNum(1000), _logFileTimeToRoll(0), _compactionReadaheadSize(0) { + _db(nullptr), _comparator(nullptr), _path() +{ setOptional(true); requiresElevatedPrivileges(false); - startsAfter("DatabasePath"); - + startsAfter("RocksDBOption"); onlyEnabledWith("MMFilesEngine"); } @@ -82,124 +76,9 @@ MMFilesPersistentIndexFeature::~MMFilesPersistentIndexFeature() { } void MMFilesPersistentIndexFeature::collectOptions(std::shared_ptr options) { - options->addSection("rocksdb", "Configure the RocksDB engine"); - - options->addOption( - "--rocksdb.enabled", - "Whether or not the RocksDB engine is enabled", - new BooleanParameter(&_active)); - - options->addOption( - "--rocksdb.write-buffer-size", - "amount of data to build up in memory before converting to a sorted on-disk file (0 = disabled)", - new UInt64Parameter(&_writeBufferSize)); - - options->addOption( - "--rocksdb.max-write-buffer-number", - "maximum number of write buffers that built up in memory", - new UInt64Parameter(&_maxWriteBufferNumber)); - - options->addHiddenOption( - "--rocksdb.delayed_write_rate", - "limited write rate to DB (in bytes per second) if we are writing to the last " - "mem table allowed and we allow more than 3 mem tables", - new UInt64Parameter(&_delayedWriteRate)); - - options->addOption( - "--rocksdb.min-write-buffer-number-to-merge", - "minimum number of write buffers that will be merged together before writing " - "to storage", - new UInt64Parameter(&_minWriteBufferNumberToMerge)); - - options->addOption( - "--rocksdb.num-levels", - "number of levels for the database", - new UInt64Parameter(&_numLevels)); - - options->addHiddenOption( - "--rocksdb.max-bytes-for-level-base", - "control maximum total data size for a level", - new UInt64Parameter(&_maxBytesForLevelBase)); - - options->addOption( - "--rocksdb.max-bytes-for-level-multiplier", - "control maximum total data size for a level", - new UInt64Parameter(&_maxBytesForLevelMultiplier)); - - options->addOption( - "--rocksdb.verify-checksums-in-compation", - "if true, compaction will verify checksum on every read that happens " - "as part of compaction", - new BooleanParameter(&_verifyChecksumsInCompaction)); - - options->addOption( - "--rocksdb.optimize-filters-for-hits", - "this flag specifies that the implementation should optimize the filters " - "mainly for cases where keys are found rather than also optimize for keys " - "missed. This would be used in cases where the application knows that " - "there are very few misses or the performance in the case of misses is not " - "important", - new BooleanParameter(&_optimizeFiltersForHits)); - - options->addOption( - "--rocksdb.base-background-compactions", - "suggested number of concurrent background compaction jobs", - new UInt64Parameter(&_baseBackgroundCompactions)); - - options->addOption( - "--rocksdb.max-background-compactions", - "maximum number of concurrent background compaction jobs", - new UInt64Parameter(&_maxBackgroundCompactions)); - - options->addOption( - "--rocksdb.max-log-file-size", - "specify the maximal size of the info log file", - new UInt64Parameter(&_maxLogFileSize)); - - options->addOption( - "--rocksdb.keep-log-file-num", - "maximal info log files to be kept", - new UInt64Parameter(&_keepLogFileNum)); - - options->addOption( - "--rocksdb.log-file-time-to-roll", - "time for the info log file to roll (in seconds). " - "If specified with non-zero value, log file will be rolled " - "if it has been active longer than `log_file_time_to_roll`", - new UInt64Parameter(&_logFileTimeToRoll)); - - options->addOption( - "--rocksdb.compaction-read-ahead-size", - "if non-zero, we perform bigger reads when doing compaction. If you're " - "running RocksDB on spinning disks, you should set this to at least 2MB. " - "that way RocksDB's compaction is doing sequential instead of random reads.", - new UInt64Parameter(&_compactionReadaheadSize)); } void MMFilesPersistentIndexFeature::validateOptions(std::shared_ptr options) { - if (!_active) { - forceDisable(); - } else { - if (_writeBufferSize > 0 && _writeBufferSize < 1024 * 1024) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.write-buffer-size'"; - FATAL_ERROR_EXIT(); - } - if (_maxBytesForLevelMultiplier == 0) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.max-bytes-for-level-multiplier'"; - FATAL_ERROR_EXIT(); - } - if (_numLevels < 1 || _numLevels > 20) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.num-levels'"; - FATAL_ERROR_EXIT(); - } - if (_baseBackgroundCompactions < 1 || _baseBackgroundCompactions > 64) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.base-background-compactions'"; - FATAL_ERROR_EXIT(); - } - if (_maxBackgroundCompactions < _baseBackgroundCompactions) { - _maxBackgroundCompactions = _baseBackgroundCompactions; - } - } } void MMFilesPersistentIndexFeature::start() { @@ -209,45 +88,47 @@ void MMFilesPersistentIndexFeature::start() { return; } + auto* opts = ApplicationServer::getFeature("RocksDBOption"); + // set the database sub-directory for RocksDB auto database = ApplicationServer::getFeature("DatabasePath"); _path = database->subdirectoryName("rocksdb"); - + LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "initializing rocksdb, path: " << _path; - + _comparator = new MMFilesPersistentIndexKeyComparator(); - + rocksdb::BlockBasedTableOptions tableOptions; tableOptions.cache_index_and_filter_blocks = true; tableOptions.filter_policy.reset(rocksdb::NewBloomFilterPolicy(12, false)); - + // TODO: using the prefix extractor will lead to the comparator being // called with just the key prefix (which the comparator currently cannot handle) // _options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(MMFilesPersistentIndex::minimalPrefixSize())); // _options.table_factory.reset(rocksdb::NewBlockBasedTableFactory(tableOptions)); - + _options.create_if_missing = true; _options.max_open_files = -1; _options.comparator = _comparator; - _options.write_buffer_size = static_cast(_writeBufferSize); - _options.max_write_buffer_number = static_cast(_maxWriteBufferNumber); - _options.delayed_write_rate = _delayedWriteRate; - _options.min_write_buffer_number_to_merge = static_cast(_minWriteBufferNumberToMerge); - _options.num_levels = static_cast(_numLevels); - _options.max_bytes_for_level_base = _maxBytesForLevelBase; - _options.max_bytes_for_level_multiplier = static_cast(_maxBytesForLevelMultiplier); - _options.verify_checksums_in_compaction = _verifyChecksumsInCompaction; - _options.optimize_filters_for_hits = _optimizeFiltersForHits; - - _options.base_background_compactions = static_cast(_baseBackgroundCompactions); - _options.max_background_compactions = static_cast(_maxBackgroundCompactions); - - _options.max_log_file_size = static_cast(_maxLogFileSize); - _options.keep_log_file_num = static_cast(_keepLogFileNum); - _options.log_file_time_to_roll = static_cast(_logFileTimeToRoll); - _options.compaction_readahead_size = static_cast(_compactionReadaheadSize); - + _options.write_buffer_size = static_cast(opts->_writeBufferSize); + _options.max_write_buffer_number = static_cast(opts->_maxWriteBufferNumber); + _options.delayed_write_rate = opts->_delayedWriteRate; + _options.min_write_buffer_number_to_merge = static_cast(opts->_minWriteBufferNumberToMerge); + _options.num_levels = static_cast(opts->_numLevels); + _options.max_bytes_for_level_base = opts->_maxBytesForLevelBase; + _options.max_bytes_for_level_multiplier = static_cast(opts->_maxBytesForLevelMultiplier); + _options.verify_checksums_in_compaction = opts->_verifyChecksumsInCompaction; + _options.optimize_filters_for_hits = opts->_optimizeFiltersForHits; + + _options.base_background_compactions = static_cast(opts->_baseBackgroundCompactions); + _options.max_background_compactions = static_cast(opts->_maxBackgroundCompactions); + + _options.max_log_file_size = static_cast(opts->_maxLogFileSize); + _options.keep_log_file_num = static_cast(opts->_keepLogFileNum); + _options.log_file_time_to_roll = static_cast(opts->_logFileTimeToRoll); + _options.compaction_readahead_size = static_cast(opts->_compactionReadaheadSize); + if (_options.base_background_compactions > 1 || _options.max_background_compactions > 1) { _options.env->SetBackgroundThreads( (std::max)(_options.base_background_compactions, _options.max_background_compactions), @@ -258,7 +139,7 @@ void MMFilesPersistentIndexFeature::start() { //options.block_cache_compressed = rocksdb::NewLRUCache(100 * 1048576); // 100MB compressed cache rocksdb::Status status = rocksdb::OptimisticTransactionDB::Open(_options, _path, &_db); - + if (! status.ok()) { LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "unable to initialize RocksDB: " << status.ToString(); FATAL_ERROR_EXIT(); @@ -276,14 +157,14 @@ void MMFilesPersistentIndexFeature::unprepare() { rocksdb::FlushOptions options; options.wait = true; rocksdb::Status status = _db->GetBaseDB()->Flush(options); - + if (! status.ok()) { LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "error flushing data to RocksDB: " << status.ToString(); } syncWal(); } - + MMFilesPersistentIndexFeature* MMFilesPersistentIndexFeature::instance() { return Instance; } @@ -335,7 +216,7 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { if (!isEnabled()) { return TRI_ERROR_NO_ERROR; } - + TRI_ASSERT(Instance != nullptr); try { @@ -344,7 +225,7 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { // create lower and upper bound for deletion builder.openArray(); builder.add(VPackSlice::minKeySlice()); - builder.close(); + builder.close(); std::string l; l.reserve(prefix.size() + builder.slice().byteSize()); @@ -355,12 +236,12 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { l.append(reinterpret_cast(&value), sizeof(uint64_t)); } l.append(builder.slice().startAs(), builder.slice().byteSize()); - + builder.clear(); builder.openArray(); builder.add(VPackSlice::maxKeySlice()); - builder.close(); - + builder.close(); + std::string u; u.reserve(prefix.size() + builder.slice().byteSize()); u.append(prefix); @@ -370,8 +251,8 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { u.append(reinterpret_cast(&value), sizeof(uint64_t)); } u.append(builder.slice().startAs(), builder.slice().byteSize()); - -#if 0 + +#if 0 for (size_t i = 0; i < prefix.size(); i += sizeof(TRI_idx_iid_t)) { char const* x = prefix.c_str() + i; size_t o; @@ -381,7 +262,7 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { TRI_FreeString(TRI_CORE_MEM_ZONE, q); } } - + LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB range: " << VPackSlice(l.c_str() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " - " << VPackSlice(u.c_str() + MMFilesPersistentIndex::keyPrefixSize()).toJson(); #endif @@ -398,15 +279,15 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "RocksDB file deletion failed"; } } - + // go on and delete the remaining keys (delete files in range does not necessarily // find them all, just complete files) - + auto comparator = MMFilesPersistentIndexFeature::instance()->comparator(); rocksdb::DB* db = _db->GetBaseDB(); rocksdb::WriteBatch batch; - + std::unique_ptr it(db->NewIterator(rocksdb::ReadOptions())); it->Seek(lower); @@ -416,12 +297,12 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { if (res >= 0) { break; } - + batch.Delete(it->key()); it->Next(); } - + // now apply deletion batch rocksdb::Status status = db->Write(rocksdb::WriteOptions(), &batch); @@ -442,4 +323,3 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) { return TRI_ERROR_INTERNAL; } } - diff --git a/arangod/MMFiles/MMFilesPersistentIndexFeature.h b/arangod/MMFiles/MMFilesPersistentIndexFeature.h index 61619760d3..b02d94956e 100644 --- a/arangod/MMFiles/MMFilesPersistentIndexFeature.h +++ b/arangod/MMFiles/MMFilesPersistentIndexFeature.h @@ -62,27 +62,10 @@ class MMFilesPersistentIndexFeature final : public application_features::Applica int dropPrefix(std::string const& prefix); private: - rocksdb::OptimisticTransactionDB* _db; rocksdb::Options _options; MMFilesPersistentIndexKeyComparator* _comparator; std::string _path; - bool _active; - uint64_t _writeBufferSize; - uint64_t _maxWriteBufferNumber; - uint64_t _delayedWriteRate; - uint64_t _minWriteBufferNumberToMerge; - uint64_t _numLevels; - uint64_t _maxBytesForLevelBase; - uint64_t _maxBytesForLevelMultiplier; - bool _verifyChecksumsInCompaction; - bool _optimizeFiltersForHits; - uint64_t _baseBackgroundCompactions; - uint64_t _maxBackgroundCompactions; - uint64_t _maxLogFileSize; - uint64_t _keepLogFileNum; - uint64_t _logFileTimeToRoll; - uint64_t _compactionReadaheadSize; }; } diff --git a/arangod/MMFiles/MMFilesWalRecoverState.cpp b/arangod/MMFiles/MMFilesWalRecoverState.cpp index 3fbf657f48..647d9efa47 100644 --- a/arangod/MMFiles/MMFilesWalRecoverState.cpp +++ b/arangod/MMFiles/MMFilesWalRecoverState.cpp @@ -1342,12 +1342,10 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker, LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "found drop database marker. databaseId: " << databaseId; - TRI_vocbase_t* vocbase = state->releaseDatabase(databaseId); + /*TRI_vocbase_t* vocbase = */ state->releaseDatabase(databaseId); - if (vocbase != nullptr) { - // ignore any potential error returned by this call - state->databaseFeature->dropDatabase(databaseId, true, false); - } + // ignore any potential error returned by this call + state->databaseFeature->dropDatabase(databaseId, true, state->isDropped(databaseId)); MMFilesPersistentIndexFeature::dropDatabase(databaseId); break; diff --git a/arangod/Replication/ContinuousSyncer.cpp b/arangod/Replication/ContinuousSyncer.cpp index d002f6231c..816b54a4c9 100644 --- a/arangod/Replication/ContinuousSyncer.cpp +++ b/arangod/Replication/ContinuousSyncer.cpp @@ -834,7 +834,10 @@ int ContinuousSyncer::applyLogMarker(VPackSlice const& slice, } else if (type == REPLICATION_COLLECTION_CREATE) { - return createCollection(slice.get("collection"), nullptr); + if (slice.get("collection").isObject()) { + return createCollection(slice.get("collection"), nullptr); + } + return createCollection(slice.get("data"), nullptr); } else if (type == REPLICATION_COLLECTION_DROP) { @@ -938,9 +941,9 @@ int ContinuousSyncer::applyLog(SimpleHttpResult* response, } if (ignoreCount == 0) { - if (lineLength > 256) { + if (lineLength > 1024) { errorMsg += - ", offending marker: " + std::string(lineStart, 256) + "..."; + ", offending marker: " + std::string(lineStart, 1024) + "..."; } else { errorMsg += ", offending marker: " + std::string(lineStart, lineLength); diff --git a/arangod/RestServer/arangod.cpp b/arangod/RestServer/arangod.cpp index 5e456fcdd9..0ca24eda28 100644 --- a/arangod/RestServer/arangod.cpp +++ b/arangod/RestServer/arangod.cpp @@ -34,6 +34,7 @@ #include "ApplicationFeatures/LanguageFeature.h" #include "ApplicationFeatures/NonceFeature.h" #include "ApplicationFeatures/PageSizeFeature.h" +#include "ApplicationFeatures/RocksDBOptionFeature.h" #include "Pregel/PregelFeature.h" #include "ApplicationFeatures/PrivilegeFeature.h" #include "ApplicationFeatures/ShutdownFeature.h" @@ -173,6 +174,7 @@ static int runServer(int argc, char** argv) { server.addFeature(new VersionFeature(&server)); server.addFeature(new ViewTypesFeature(&server)); server.addFeature(new WorkMonitorFeature(&server)); + server.addFeature(new RocksDBOptionFeature(&server)); #ifdef ARANGODB_HAVE_FORK server.addFeature(new DaemonFeature(&server)); diff --git a/arangod/RocksDBEngine/RocksDBCommon.h b/arangod/RocksDBEngine/RocksDBCommon.h index c1d2c546eb..72a53b9bea 100644 --- a/arangod/RocksDBEngine/RocksDBCommon.h +++ b/arangod/RocksDBEngine/RocksDBCommon.h @@ -48,34 +48,31 @@ namespace rocksdb {class TransactionDB; namespace arangodb { class RocksDBOperationResult : public Result { -public: + public: RocksDBOperationResult() - :Result() - ,_keySize(0) - ,_commitRequired(false) - {} + : Result(), + _keySize(0), + _commitRequired(false) {} RocksDBOperationResult(Result const& other) - : _keySize(0) - ,_commitRequired(false) - { + : _keySize(0), + _commitRequired(false) { cloneData(other); } RocksDBOperationResult(Result&& other) - : _keySize(0) - ,_commitRequired(false) - { + : _keySize(0), + _commitRequired(false) { cloneData(std::move(other)); } - uint64_t keySize(){ return _keySize; } - uint64_t keySize(uint64_t s ) { _keySize = s; return _keySize; } + uint64_t keySize() const { return _keySize; } + void keySize(uint64_t s) { _keySize = s; } - bool commitRequired(){ return _commitRequired; } - bool commitRequired(bool cr ) { _commitRequired = cr; return _commitRequired; } + bool commitRequired() const { return _commitRequired; } + void commitRequired(bool cr) { _commitRequired = cr; } -protected: + protected: uint64_t _keySize; bool _commitRequired; }; diff --git a/arangod/RocksDBEngine/RocksDBCounterManager.cpp b/arangod/RocksDBEngine/RocksDBCounterManager.cpp index d48fb1e7a2..9f51cb179a 100644 --- a/arangod/RocksDBEngine/RocksDBCounterManager.cpp +++ b/arangod/RocksDBEngine/RocksDBCounterManager.cpp @@ -342,7 +342,7 @@ bool RocksDBCounterManager::parseRocksWAL() { iterator->Next(); } - LOG_TOPIC(INFO, Logger::ENGINES) << "Finished WAL scan with " + LOG_TOPIC(TRACE, Logger::ENGINES) << "finished WAL scan with " << handler->deltas.size(); for (std::pair pair : handler->deltas) { @@ -352,7 +352,7 @@ bool RocksDBCounterManager::parseRocksWAL() { it->second._count += pair.second.added(); it->second._count -= pair.second.removed(); it->second._revisionId = pair.second._revisionId; - LOG_TOPIC(INFO, Logger::ENGINES) + LOG_TOPIC(TRACE, Logger::ENGINES) << "WAL recovered " << pair.second.added() << " PUTs and " << pair.second.removed() << " DELETEs for a total of " << it->second._count; diff --git a/arangod/RocksDBEngine/RocksDBEngine.cpp b/arangod/RocksDBEngine/RocksDBEngine.cpp index 9045360c2b..14f776a12a 100644 --- a/arangod/RocksDBEngine/RocksDBEngine.cpp +++ b/arangod/RocksDBEngine/RocksDBEngine.cpp @@ -22,6 +22,7 @@ /// @author Jan Christoph Uhde //////////////////////////////////////////////////////////////////////////////// +#include "ApplicationFeatures/RocksDBOptionFeature.h" #include "RocksDBEngine.h" #include "Basics/Exceptions.h" #include "Basics/FileUtils.h" @@ -78,8 +79,14 @@ std::string const RocksDBEngine::FeatureName("RocksDBEngine"); RocksDBEngine::RocksDBEngine(application_features::ApplicationServer* server) : StorageEngine(server, EngineName, FeatureName, new RocksDBIndexFactory()), _db(nullptr), - _cmp(new RocksDBComparator()) { - // inherits order from StorageEngine + _cmp(new RocksDBComparator()), + _maxTransactionSize((std::numeric_limits::max)()), + _intermediateTransactionCommitSize(32 * 1024 * 1024), + _intermediateTransactionCommitCount(100000), + _intermediateTransactionCommitEnabled(false) { + // inherits order from StorageEngine but requires RocksDBOption that are used + // to configure this Engine and the MMFiles PesistentIndexFeature + startsAfter("RocksDBOption"); } RocksDBEngine::~RocksDBEngine() { delete _db; } @@ -93,29 +100,24 @@ void RocksDBEngine::collectOptions( options->addSection("rocksdb", "RocksDB engine specific configuration"); // control transaction size for RocksDB engine - _maxTransactionSize = - std::numeric_limits::max(); // set sensible default value here options->addOption("--rocksdb.max-transaction-size", "transaction size limit (in bytes)", new UInt64Parameter(&_maxTransactionSize)); - // control intermediate transactions in RocksDB - _intermediateTransactionSize = _maxTransactionSize * 0.8; options->addOption( "--rocksdb.intermediate-transaction-count", - "an intermediate commit will be triend if this count is reached", - new UInt64Parameter(&_intermediateTransactionSize)); + "an intermediate commit will be tried when a transaction has accumulated operations of this size (in bytes)", + new UInt64Parameter(&_intermediateTransactionCommitSize)); options->addOption( "--rocksdb.intermediate-transaction-count", - "an intermediate commit will be triend if this count is reached", - new UInt64Parameter(&_intermediateTransactionCount)); - _intermediateTransactionCount = 100 * 1000; + "an intermediate commit will be tried when this number of operations is reached in a transaction", + new UInt64Parameter(&_intermediateTransactionCommitCount)); + _intermediateTransactionCommitCount = 100 * 1000; - _intermediateTransactionEnabled = false; options->addOption("--rocksdb.intermediate-transaction", "enable intermediate transactions", - new BooleanParameter(&_intermediateTransactionEnabled)); + new BooleanParameter(&_intermediateTransactionCommitEnabled)); } // validate the storage engine's specific options @@ -140,16 +142,36 @@ void RocksDBEngine::start() { } // set the database sub-directory for RocksDB - auto databasePathFeature = + auto* databasePathFeature = ApplicationServer::getFeature("DatabasePath"); _path = databasePathFeature->subdirectoryName("engine-rocksdb"); LOG_TOPIC(TRACE, arangodb::Logger::STARTUP) << "initializing rocksdb, path: " << _path; + double counter_sync_seconds = 2.5; rocksdb::TransactionDBOptions transactionOptions; - double counter_sync_seconds = 2.5; + //options imported set by RocksDBOptionFeature + auto* opts = ApplicationServer::getFeature("RocksDBOption"); + _options.write_buffer_size = static_cast(opts->_writeBufferSize); + _options.max_write_buffer_number = static_cast(opts->_maxWriteBufferNumber); + _options.delayed_write_rate = opts->_delayedWriteRate; + _options.min_write_buffer_number_to_merge = static_cast(opts->_minWriteBufferNumberToMerge); + _options.num_levels = static_cast(opts->_numLevels); + _options.max_bytes_for_level_base = opts->_maxBytesForLevelBase; + _options.max_bytes_for_level_multiplier = static_cast(opts->_maxBytesForLevelMultiplier); + _options.verify_checksums_in_compaction = opts->_verifyChecksumsInCompaction; + _options.optimize_filters_for_hits = opts->_optimizeFiltersForHits; + + _options.base_background_compactions = static_cast(opts->_baseBackgroundCompactions); + _options.max_background_compactions = static_cast(opts->_maxBackgroundCompactions); + + _options.max_log_file_size = static_cast(opts->_maxLogFileSize); + _options.keep_log_file_num = static_cast(opts->_keepLogFileNum); + _options.log_file_time_to_roll = static_cast(opts->_logFileTimeToRoll); + _options.compaction_readahead_size = static_cast(opts->_compactionReadaheadSize); + _options.create_if_missing = true; _options.max_open_files = -1; _options.comparator = _cmp.get(); @@ -207,8 +229,8 @@ transaction::ContextData* RocksDBEngine::createTransactionContextData() { TransactionState* RocksDBEngine::createTransactionState( TRI_vocbase_t* vocbase) { return new RocksDBTransactionState( - vocbase, _maxTransactionSize, _intermediateTransactionEnabled, - _intermediateTransactionSize, _intermediateTransactionCount); + vocbase, _maxTransactionSize, _intermediateTransactionCommitEnabled, + _intermediateTransactionCommitSize, _intermediateTransactionCommitCount); } TransactionCollection* RocksDBEngine::createTransactionCollection( @@ -624,18 +646,6 @@ void RocksDBEngine::createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId, TRI_idx_iid_t indexId, arangodb::velocypack::Slice const& data) { - /* - rocksdb::WriteOptions options; // TODO: check which options would make sense - auto key = RocksDBKey::Index(vocbase->id(), collectionId, indexId); - auto value = RocksDBValue::Index(data); - - rocksdb::Status res = _db->Put(options, key.string(), value.string()); - auto result = rocksutils::convertStatus(res); - if (!result.ok()) { - THROW_ARANGO_EXCEPTION(result.errorNumber()); - } - */ - // THROW_ARANGO_NOT_YET_IMPLEMENTED(); } void RocksDBEngine::dropIndex(TRI_vocbase_t* vocbase, diff --git a/arangod/RocksDBEngine/RocksDBEngine.h b/arangod/RocksDBEngine/RocksDBEngine.h index e1f3271924..0c9ba9433a 100644 --- a/arangod/RocksDBEngine/RocksDBEngine.h +++ b/arangod/RocksDBEngine/RocksDBEngine.h @@ -266,9 +266,9 @@ class RocksDBEngine final : public StorageEngine { std::unique_ptr _counterManager; // tracks the count of documents in collections uint64_t _maxTransactionSize; // maximum allowed size for a transaction - uint64_t _intermediateTransactionSize; // maximum size for a transaction before a intermediate commit will be tried - uint64_t _intermediateTransactionCount; // limit of transaction count for intermediate commit - bool _intermediateTransactionEnabled; // allow usage of intermediate commits + uint64_t _intermediateTransactionCommitSize; // maximum size for a transaction before a intermediate commit will be tried + uint64_t _intermediateTransactionCommitCount; // limit of transaction count for intermediate commit + bool _intermediateTransactionCommitEnabled; // allow usage of intermediate commits }; } #endif diff --git a/arangod/RocksDBEngine/RocksDBHashIndex.h b/arangod/RocksDBEngine/RocksDBHashIndex.h index aeb0f52e5a..4c8e89f267 100644 --- a/arangod/RocksDBEngine/RocksDBHashIndex.h +++ b/arangod/RocksDBEngine/RocksDBHashIndex.h @@ -43,7 +43,7 @@ class RocksDBHashIndex final : public RocksDBVPackIndex { bool matchesDefinition(VPackSlice const& info) const override; - bool isSorted() const override { return false; } + bool isSorted() const override { return true; } }; } diff --git a/arangod/RocksDBEngine/RocksDBIndexFactory.cpp b/arangod/RocksDBEngine/RocksDBIndexFactory.cpp index e70b1951a3..3a49cc6215 100644 --- a/arangod/RocksDBEngine/RocksDBIndexFactory.cpp +++ b/arangod/RocksDBEngine/RocksDBIndexFactory.cpp @@ -264,12 +264,14 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition, enhanced.add("objectId", VPackValue(std::to_string(TRI_NewTickServer()))); } - } else { + } + // breaks lookupIndex() + /*else { if (!definition.hasKey("objectId")) { // objectId missing, but must be present return TRI_ERROR_INTERNAL; } - } + }*/ enhanced.add("type", VPackValue(Index::oldtypeName(type))); diff --git a/arangod/RocksDBEngine/RocksDBPrimaryIndex.cpp b/arangod/RocksDBEngine/RocksDBPrimaryIndex.cpp index b9a14e19bd..e932e724c8 100644 --- a/arangod/RocksDBEngine/RocksDBPrimaryIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBPrimaryIndex.cpp @@ -191,7 +191,9 @@ RocksDBAnyIndexIterator::RocksDBAnyIndexIterator( ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index) : IndexIterator(collection, trx, mmdr, index), _cmp(index->_cmp), - _bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())) { + _bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())), + _total(0), + _returned(0) { // acquire rocksdb transaction RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx); rocksdb::Transaction* rtrx = state->rocksTransaction(); diff --git a/arangod/RocksDBEngine/RocksDBPrimaryIndex.h b/arangod/RocksDBEngine/RocksDBPrimaryIndex.h index 5ccae903f8..aab926bc2a 100644 --- a/arangod/RocksDBEngine/RocksDBPrimaryIndex.h +++ b/arangod/RocksDBEngine/RocksDBPrimaryIndex.h @@ -119,7 +119,8 @@ class RocksDBAnyIndexIterator final : public IndexIterator { RocksDBComparator const* _cmp; std::unique_ptr _iterator; RocksDBKeyBounds _bounds; - uint64_t _total, _returned; + uint64_t _total; + uint64_t _returned; }; class RocksDBPrimaryIndex final : public RocksDBIndex { diff --git a/arangod/RocksDBEngine/RocksDBTransactionState.cpp b/arangod/RocksDBEngine/RocksDBTransactionState.cpp index ec8fdda3f1..89c9a56730 100644 --- a/arangod/RocksDBEngine/RocksDBTransactionState.cpp +++ b/arangod/RocksDBEngine/RocksDBTransactionState.cpp @@ -189,8 +189,6 @@ Result RocksDBTransactionState::commitTransaction( _cacheTx = nullptr; } - // LOG_TOPIC(ERR, Logger::FIXME) << "#" << _id << " COMMIT"; - rocksdb::Snapshot const* snap = this->_rocksReadOptions.snapshot; TRI_ASSERT(snap != nullptr); @@ -220,12 +218,6 @@ Result RocksDBTransactionState::commitTransaction( } updateStatus(transaction::Status::COMMITTED); - - // if a write query, clear the query cache for the participating collections - if (AccessMode::isWriteOrExclusive(_type) && !_collections.empty() && - arangodb::aql::QueryCache::instance()->mayBeActive()) { - clearQueryCache(); - } } unuseCollections(_nestingLevel); @@ -254,8 +246,6 @@ Result RocksDBTransactionState::abortTransaction( _cacheTx = nullptr; } - // LOG_TOPIC(ERR, Logger::FIXME) << "#" << _id << " ABORT"; - updateStatus(transaction::Status::ABORTED); if (hasOperations()) { @@ -280,9 +270,9 @@ RocksDBOperationResult RocksDBTransactionState::addOperation( uint64_t newSize = _transactionSize + operationSize + keySize; if (_maxTransactionSize < newSize) { // we hit the transaction size limit - std::string message = "maximal transaction size limit of " + - std::to_string(_maxTransactionSize) + - " bytes reached!"; + std::string message = + "aborting transaction because maximal transaction size limit of " + + std::to_string(_maxTransactionSize) + " bytes is reached"; res.reset(TRI_ERROR_RESOURCE_LIMIT, message); return res; } @@ -299,6 +289,12 @@ RocksDBOperationResult RocksDBTransactionState::addOperation( // should not fail or fail with exception collection->addOperation(operationType, operationSize, revisionId); + // clear the query cache for this collection + if (arangodb::aql::QueryCache::instance()->mayBeActive()) { + arangodb::aql::QueryCache::instance()->invalidate( + _vocbase, collection->collectionName()); + } + switch (operationType) { case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN: break; diff --git a/arangod/RocksDBEngine/RocksDBVPackIndex.cpp b/arangod/RocksDBEngine/RocksDBVPackIndex.cpp index 24ec2df800..9848d1d78d 100644 --- a/arangod/RocksDBEngine/RocksDBVPackIndex.cpp +++ b/arangod/RocksDBEngine/RocksDBVPackIndex.cpp @@ -835,11 +835,11 @@ bool RocksDBVPackIndex::supportsFilterCondition( arangodb::aql::Variable const* reference, size_t itemsInIndex, size_t& estimatedItems, double& estimatedCost) const { // HashIndex has different semantics - if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) { + /*if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) { SimpleAttributeEqualityMatcher matcher(_fields); return matcher.matchAll(this, node, reference, itemsInIndex, estimatedItems, estimatedCost); - } + }*/ std::unordered_map> found; std::unordered_set nonNullAttributes; @@ -1206,10 +1206,10 @@ arangodb::aql::AstNode* RocksDBVPackIndex::specializeCondition( arangodb::aql::AstNode* node, arangodb::aql::Variable const* reference) const { // HashIndex uses slightly different semantics - if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) { + /*if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) { SimpleAttributeEqualityMatcher matcher(_fields); return matcher.specializeAll(this, node, reference); - } + }*/ std::unordered_map> found; std::unordered_set nonNullAttributes; diff --git a/js/apps/system/_admin/aardvark/APP/frontend/js/views/dashboardView.js b/js/apps/system/_admin/aardvark/APP/frontend/js/views/dashboardView.js index 7df5c54173..c0b2053d1d 100644 --- a/js/apps/system/_admin/aardvark/APP/frontend/js/views/dashboardView.js +++ b/js/apps/system/_admin/aardvark/APP/frontend/js/views/dashboardView.js @@ -991,6 +991,7 @@ template: templateEngine.createTemplate('dashboardView.ejs'), render: function (modalView) { + this.delegateEvents(this.events); var callback = function (enabled, modalView) { if (!modalView) { $(this.el).html(this.template.render()); diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index 7e81082c02..a7725b53d9 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -511,11 +511,13 @@ function unitTest (cases, options) { print("not cleaning up since we didn't start the server ourselves\n"); } - try { - yaml.safeDump(JSON.parse(JSON.stringify(results))); - } catch (err) { - print(RED + 'cannot dump results: ' + String(err) + RESET); - print(RED + require('internal').inspect(results) + RESET); + if (options.extremeVerbosity === true) { + try { + print(yaml.safeDump(JSON.parse(JSON.stringify(results)))); + } catch (err) { + print(RED + 'cannot dump results: ' + String(err) + RESET); + print(RED + require('internal').inspect(results) + RESET); + } } if (jsonReply === true) { diff --git a/js/common/tests/shell/shell-rocksdb-index.js b/js/common/tests/shell/shell-persistent-index-mmfiles.js similarity index 100% rename from js/common/tests/shell/shell-rocksdb-index.js rename to js/common/tests/shell/shell-persistent-index-mmfiles.js diff --git a/js/server/modules/@arangodb/arango-collection.js b/js/server/modules/@arangodb/arango-collection.js index 2e88cc6880..1dcc6cb0d0 100644 --- a/js/server/modules/@arangodb/arango-collection.js +++ b/js/server/modules/@arangodb/arango-collection.js @@ -515,20 +515,26 @@ ArangoCollection.prototype.lookupFulltextIndex = function (field, minLength) { }; // ////////////////////////////////////////////////////////////////////////////// -// / @brief getIndex() wrapper to ensure consistency between mmfiles on rocksdb +// / @brief getIndex() wrapper to ensure consistency between mmfiles and rocksdb // ////////////////////////////////////////////////////////////////////////////// ArangoCollection.prototype.getIndexes = function (withFigures) { 'use strict'; var indexes = this.getIndexesPrivate(withFigures); if (this.type() === 3) { + // edge collections var result = []; for (var i = 0; i < indexes.length; i++) { - if(indexes[i].type === "edge") { + if (indexes[i].type === "edge") { if (indexes[i].fields.length === 1 && indexes[i].fields[0] === "_from") { + // we got two edge indexes. now pretend we only have one, and + // make it claim it is created on _from and _to indexes[i].fields.push("_to"); result.push(indexes[i]); + } else if (indexes[i].fields.length === 2) { + // we have an edge index with two attributes + result.push(indexes[i]); } } else { result.push(indexes[i]); diff --git a/js/server/tests/aql/aql-hash-noncluster.js b/js/server/tests/aql/aql-hash-noncluster.js index bcd863eed7..b6397193ec 100644 --- a/js/server/tests/aql/aql-hash-noncluster.js +++ b/js/server/tests/aql/aql-hash-noncluster.js @@ -29,6 +29,7 @@ //////////////////////////////////////////////////////////////////////////////// var internal = require("internal"); +var db = internal.db; var jsunity = require("jsunity"); var helper = require("@arangodb/aql-helper"); var getQueryResults = helper.getQueryResults; @@ -275,7 +276,11 @@ function ahuacatlHashTestSuite () { assertEqual(expected, actual); - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// @@ -288,8 +293,12 @@ function ahuacatlHashTestSuite () { var actual = getQueryResults(query); assertEqual(expected, actual); - - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// @@ -303,7 +312,11 @@ function ahuacatlHashTestSuite () { assertEqual(expected, actual); - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "ReturnNode" ], explain(query)); + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// @@ -316,8 +329,12 @@ function ahuacatlHashTestSuite () { var actual = getQueryResults(query); assertEqual(expected, actual); - - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// @@ -330,8 +347,12 @@ function ahuacatlHashTestSuite () { var actual = getQueryResults(query); assertEqual(expected, actual); - - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query)); + } }, testInvalidValuesinList : function () { diff --git a/js/server/tests/aql/aql-optimizer-collect-methods.js b/js/server/tests/aql/aql-optimizer-collect-methods.js index 91c0437589..27202a2094 100644 --- a/js/server/tests/aql/aql-optimizer-collect-methods.js +++ b/js/server/tests/aql/aql-optimizer-collect-methods.js @@ -1,5 +1,5 @@ /*jshint globalstrict:false, strict:false, maxlen: 500 */ -/*global assertEqual, AQL_EXECUTE, AQL_EXPLAIN */ +/*global assertEqual, assertFalse, AQL_EXECUTE, AQL_EXPLAIN */ //////////////////////////////////////////////////////////////////////////////// /// @brief tests for COLLECT w/ COUNT @@ -30,6 +30,7 @@ var jsunity = require("jsunity"); var db = require("@arangodb").db; +var internal = require("internal"); //////////////////////////////////////////////////////////////////////////////// /// @brief test suite @@ -147,7 +148,11 @@ function optimizerCollectMethodsTestSuite () { /// @brief expect hash COLLECT //////////////////////////////////////////////////////////////////////////////// - testHashedWithNonSortedIndex : function () { + testHashedWithNonSortedIndexMMFiles : function () { + if (db._engine().name !== "mmfiles") { + return; + } + c.ensureIndex({ type: "hash", fields: [ "group" ] }); c.ensureIndex({ type: "hash", fields: [ "group", "value" ] }); @@ -182,6 +187,53 @@ function optimizerCollectMethodsTestSuite () { assertEqual(query[1], results.json.length); }); }, + + + //////////////////////////////////////////////////////////////////////////////// + /// @brief expect hash COLLECT + //////////////////////////////////////////////////////////////////////////////// + + testHashedWithNonSortedIndexRocksDB : function () { + if (db._engine().name !== "rocksdb") { + return; + } + + c.ensureIndex({ type: "hash", fields: [ "group" ] }); + c.ensureIndex({ type: "hash", fields: [ "group", "value" ] }); + + var queries = [ + [ "FOR j IN " + c.name() + " COLLECT value = j RETURN value", 1500, false], + [ "FOR j IN " + c.name() + " COLLECT value = j._key RETURN value", 1500, false], + [ "FOR j IN " + c.name() + " COLLECT value = j.group RETURN value", 10, true], + [ "FOR j IN " + c.name() + " COLLECT value1 = j.group, value2 = j.value RETURN [ value1, value2 ]", 1500, true ], + [ "FOR j IN " + c.name() + " COLLECT value = j.group WITH COUNT INTO l RETURN [ value, l ]", 10, true ], + [ "FOR j IN " + c.name() + " COLLECT value1 = j.group, value2 = j.value WITH COUNT INTO l RETURN [ value1, value2, l ]", 1500, true ] + ]; + + queries.forEach(function(query) { + var plan = AQL_EXPLAIN(query[0]).plan; + + var aggregateNodes = 0; + var sortNodes = 0; + plan.nodes.map(function(node) { + if (node.type === "CollectNode") { + ++aggregateNodes; + assertFalse(query[2] && node.collectOptions.method !== "sorted"); + assertEqual(query[2] ? "sorted" : "hash", + node.collectOptions.method, query[0]); + } + if (node.type === "SortNode") { + ++sortNodes; + } + }); + + assertEqual(1, aggregateNodes); + assertEqual(query[2] ? 0 : 1, sortNodes); + + var results = AQL_EXECUTE(query[0]); + assertEqual(query[1], results.json.length); + }); + }, //////////////////////////////////////////////////////////////////////////////// /// @brief expect sorted COLLECT diff --git a/js/server/tests/aql/aql-optimizer-indexes-multi.js b/js/server/tests/aql/aql-optimizer-indexes-multi.js index 91122c0965..d05e1d5db8 100644 --- a/js/server/tests/aql/aql-optimizer-indexes-multi.js +++ b/js/server/tests/aql/aql-optimizer-indexes-multi.js @@ -1292,8 +1292,10 @@ function optimizerIndexesMultiTestSuite () { // Furthermore, we check the type of expression in the CalcNode // and the number of subnodes: assertEqual("CalculationNode", plan.nodes[2].type, query); - assertEqual("SortNode", plan.nodes[3].type, query); - + if (db._engine().name !== "rocksdb") { + assertEqual("SortNode", plan.nodes[3].type, query); + } + var results = AQL_EXECUTE(query); var correct = makeResult(maker).map(function(x) { return x.a; }); assertEqual(correct, results.json, query); @@ -1350,7 +1352,9 @@ function optimizerIndexesMultiTestSuite () { // Furthermore, we check the type of expression in the CalcNode // and the number of subnodes: assertEqual("CalculationNode", plan.nodes[2].type, query); - assertEqual("SortNode", plan.nodes[3].type, query); + if (db._engine().name !== "rocksdb") { + assertEqual("SortNode", plan.nodes[3].type, query); + } var results = AQL_EXECUTE(query); var correct = makeResult(maker).map(function(x) { return x.a; }); assertEqual(correct, results.json, query); diff --git a/js/server/tests/aql/aql-optimizer-indexes-sort.js b/js/server/tests/aql/aql-optimizer-indexes-sort.js index 0c14d71cd7..39bd36f2a7 100644 --- a/js/server/tests/aql/aql-optimizer-indexes-sort.js +++ b/js/server/tests/aql/aql-optimizer-indexes-sort.js @@ -200,7 +200,11 @@ function optimizerIndexesSortTestSuite () { /// @brief test index usage //////////////////////////////////////////////////////////////////////////////// - testSingleAttributeSortNotOptimizedAway : function () { + testSingleAttributeSortNotOptimizedAwayMMFiles : function () { + if (db._engine().name !== "mmfiles") { + return; + } + AQL_EXECUTE("FOR i IN " + c.name() + " UPDATE i WITH { value2: i.value, value3: i.value } IN " + c.name()); c.ensureHashIndex("value2"); @@ -229,6 +233,50 @@ function optimizerIndexesSortTestSuite () { }); }, + testSingleAttributeSortNotOptimizedAwayRocksDB : function () { + if (db._engine().name !== "rocksdb") { + return; + } + + AQL_EXECUTE("FOR i IN " + c.name() + " UPDATE i WITH { value2: i.value, value3: i.value } IN " + c.name()); + + c.ensureHashIndex("value2"); + c.ensureHashIndex("value3"); + + var queries = [ + "FOR j IN " + c.name() + " FILTER j.value2 == 2 FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value2 RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value2 == 2 || i.value2 == 3 SORT i.value3 RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value2, i.value3 RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value2, PASSTHRU(1) RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value3 == 2 SORT i.value2 RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value3 == 2 SORT i.value3, i.value2 RETURN i.value2", + "FOR i IN " + c.name() + " FILTER i.value3 == 2 SORT PASSTHRU(1) RETURN i.value2" + ]; + + queries.forEach(function(query) { + var plan = AQL_EXPLAIN(query).plan; + var nodeTypes = plan.nodes.map(function(node) { + return node.type; + }); + + assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query); + assertNotEqual(-1, nodeTypes.indexOf("SortNode"), query); + }); + + queries = ["FOR i IN " + c.name() + " FILTER i.value2 == 2 || i.value2 == 3 SORT i.value2 RETURN i.value2"]; + + queries.forEach(function(query) { + var plan = AQL_EXPLAIN(query).plan; + var nodeTypes = plan.nodes.map(function(node) { + return node.type; + }); + + assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query); + assertEqual(-1, nodeTypes.indexOf("SortNode"), query); + }); + }, + //////////////////////////////////////////////////////////////////////////////// /// @brief test index usage //////////////////////////////////////////////////////////////////////////////// @@ -355,7 +403,11 @@ function optimizerIndexesSortTestSuite () { /// @brief test index usage //////////////////////////////////////////////////////////////////////////////// - testCannotUseHashIndexForSortIfConstRangesMore : function () { + testCannotUseHashIndexForSortIfConstRangesMoreMMFiles : function () { + if (db._engine().name !== "mmfiles") { + return; + } + c.ensureIndex({ type: "hash", fields: [ "value2", "value3", "value4" ] }); var queries = [ @@ -396,6 +448,52 @@ function optimizerIndexesSortTestSuite () { } }); }, + + //////////////////////////////////////////////////////////////////////////////// + /// @brief test index usage + //////////////////////////////////////////////////////////////////////////////// + + testCannotUseHashIndexForSortIfConstRangesMoreRocksDB : function () { + if (db._engine().name !== "rocksdb") { + return; + } + + c.ensureIndex({ type: "hash", fields: [ "value2", "value3", "value4" ] }); + + var queries = [ + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", true ], + + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value4 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value4 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2" ,true ], + + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 ASC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 DESC, i.value3 DESC, i.value4 DESC RETURN i.value2", true ], + [ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 DESC RETURN i.value2", true ] + ]; + + queries.forEach(function(query) { + var plan = AQL_EXPLAIN(query[0]).plan; + var nodeTypes = plan.nodes.map(function(node) { + return node.type; + }); + + assertEqual(-1, nodeTypes.indexOf("SortNode"), query[0]); + + }); + }, //////////////////////////////////////////////////////////////////////////////// /// @brief test index usage diff --git a/js/server/tests/aql/aql-optimizer-indexes.js b/js/server/tests/aql/aql-optimizer-indexes.js index 7c5889bf62..30ec3afc1f 100644 --- a/js/server/tests/aql/aql-optimizer-indexes.js +++ b/js/server/tests/aql/aql-optimizer-indexes.js @@ -946,7 +946,11 @@ function optimizerIndexesTestSuite () { walker(plan.nodes, function (node) { if (node.type === "IndexNode") { ++indexNodes; - assertEqual("hash", node.indexes[0].type); + if (db._engine().name === "rocksdb") { + assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1); + } else { + assertEqual("hash", node.indexes[0].type); + } } else if (node.type === "EnumerateCollectionNode") { ++collectionNodes; @@ -1032,12 +1036,18 @@ function optimizerIndexesTestSuite () { ++indexNodes; if (indexNodes === 1) { // skiplist must be used for the first FOR - assertEqual("skiplist", node.indexes[0].type); + if (db._engine().name === "rocksdb") { + assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1); + } else { + assertEqual("skiplist", node.indexes[0].type); + } assertEqual("i", node.outVariable.name); } else { - // second FOR should use a hash index - assertEqual("hash", node.indexes[0].type); + if (db._engine().name !== "rocksdb") {// all indexes were created equal + // second FOR should use a hash index + assertEqual("hash", node.indexes[0].type); + } assertEqual("j", node.outVariable.name); } } @@ -1111,11 +1121,19 @@ function optimizerIndexesTestSuite () { if (node.type === "IndexNode") { ++indexNodes; if (indexNodes === 1) { - assertEqual("hash", node.indexes[0].type); + if (db._engine().name === "rocksdb") { + assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1); + } else { + assertEqual("hash", node.indexes[0].type); + } assertEqual("i", node.outVariable.name); } else if (indexNodes === 2) { - assertEqual("hash", node.indexes[0].type); + if (db._engine().name === "rocksdb") { + assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1); + } else { + assertEqual("hash", node.indexes[0].type); + } assertEqual("j", node.outVariable.name); } else { @@ -1173,7 +1191,11 @@ function optimizerIndexesTestSuite () { walker(plan.nodes, function (node) { if (node.type === "IndexNode") { ++indexNodes; - assertEqual("hash", node.indexes[0].type); + if (db._engine().name === "rocksdb") { + assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1); + } else { + assertEqual("hash", node.indexes[0].type); + } } else if (node.type === "EnumerateCollectionNode") { ++collectionNodes; @@ -1263,7 +1285,11 @@ function optimizerIndexesTestSuite () { var plan = AQL_EXPLAIN(query).plan; var nodeTypes = plan.nodes.map(function(node) { if (node.type === "IndexNode") { - assertEqual("hash", node.indexes[0].type); + if (db._engine().name === "rocksdb") { + assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1); + } else { + assertEqual("hash", node.indexes[0].type); + } assertFalse(node.indexes[0].unique); } return node.type; @@ -1644,12 +1670,20 @@ function optimizerIndexesTestSuite () { var nodeTypes = plan.nodes.map(function(node) { return node.type; }); - assertEqual(-1, nodeTypes.indexOf("IndexNode"), query); + // rocksdb supports prefix filtering in the hash index + if (db._engine().name !== "rocksdb") { + assertEqual(-1, nodeTypes.indexOf("IndexNode"), query); + } var results = AQL_EXECUTE(query); assertEqual([ 1, 2 ], results.json.sort(), query); - assertEqual(0, results.stats.scannedIndex); - assertTrue(results.stats.scannedFull > 0); + if (db._engine().name === "rocksdb") { + assertEqual(2, results.stats.scannedIndex); + assertEqual(0, results.stats.scannedFull); + } else { + assertEqual(0, results.stats.scannedIndex); + assertTrue(results.stats.scannedFull > 0); + } }, //////////////////////////////////////////////////////////////////////////////// diff --git a/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js b/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js index 93890b123d..f009ce7490 100644 --- a/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js +++ b/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js @@ -29,6 +29,7 @@ //////////////////////////////////////////////////////////////////////////////// var internal = require("internal"); +var db = internal.db; var jsunity = require("jsunity"); var helper = require("@arangodb/aql-helper"); var isEqual = helper.isEqual; @@ -162,9 +163,12 @@ function optimizerRuleTestSuite() { [ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.c RETURN 1", true, false ], [ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.z RETURN 1", false, true ], [ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.f RETURN 1", false, true ], - [ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", false, true ], - [ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", false, true ], - [ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", false, true ], + [ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", + db._engine().name === "rocksdb", db._engine().name !== "rocksdb" ], + [ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", + db._engine().name === "rocksdb", db._engine().name !== "rocksdb" ], + [ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", + db._engine().name === "rocksdb", db._engine().name !== "rocksdb" ], [ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.z RETURN 1", false, true ], [ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y RETURN 1", true, false ], [ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z RETURN 1", true, false ], @@ -213,21 +217,25 @@ function optimizerRuleTestSuite() { var j; var queries = [ - ["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true], + ["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true, true], ["FOR v IN " + colName + " SORT v.b, v.a RETURN [v.a]", true], - ["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true], + ["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true, true], ["FOR v IN " + colName + " SORT v.a + 1 RETURN [v.a]", false], ["FOR v IN " + colName + " SORT CONCAT(TO_STRING(v.a), \"lol\") RETURN [v.a]", true], // TODO: limit blocks sort atm. - ["FOR v IN " + colName + " FILTER v.a > 2 LIMIT 3 SORT v.a RETURN [v.a]", false], + ["FOR v IN " + colName + " FILTER v.a > 2 LIMIT 3 SORT v.a RETURN [v.a]", true], ["FOR v IN " + colName + " FOR w IN " + colNameOther + " SORT v.a RETURN [v.a]", true] ]; queries.forEach(function(query) { var result = AQL_EXPLAIN(query[0], { }, paramIndexFromSort); - assertEqual([], removeAlwaysOnClusterRules(result.plan.rules), query); - if (query[1]) { + if (db._engine().name === "rocksdb" && query.length === 3 && query[2]) { + assertEqual(["use-index-for-sort"], removeAlwaysOnClusterRules(result.plan.rules), query); + } else { + assertEqual([], removeAlwaysOnClusterRules(result.plan.rules), query); + } + if (!query[1]) { var allresults = getQueryMultiplePlansAndExecutions(query[0], {}); for (j = 1; j < allresults.results.length; j++) { assertTrue(isEqual(allresults.results[0], diff --git a/js/server/tests/aql/aql-optimizer-rule-use-index-range.js b/js/server/tests/aql/aql-optimizer-rule-use-index-range.js index 44f630f63b..e4950bddac 100644 --- a/js/server/tests/aql/aql-optimizer-rule-use-index-range.js +++ b/js/server/tests/aql/aql-optimizer-rule-use-index-range.js @@ -32,6 +32,7 @@ var internal = require("internal"); var jsunity = require("jsunity"); var helper = require("@arangodb/aql-helper"); var removeAlwaysOnClusterRules = helper.removeAlwaysOnClusterRules; +var db = internal.db; //////////////////////////////////////////////////////////////////////////////// /// @brief test suite @@ -117,14 +118,16 @@ function optimizerRuleUseIndexRangeTester () { testRuleNoEffect : function () { var queries = [ - "FOR i IN UTUseIndexRangeNoInd FILTER i.a >= 2 RETURN i", - "FOR i IN UTUseIndexRangeNoInd FILTER i.a == 2 RETURN i", - "FOR i IN UTUseIndexRangeHashInd FILTER i.a >= 2 RETURN i" + ["FOR i IN UTUseIndexRangeNoInd FILTER i.a >= 2 RETURN i", true], + ["FOR i IN UTUseIndexRangeNoInd FILTER i.a == 2 RETURN i", true], + ["FOR i IN UTUseIndexRangeHashInd FILTER i.a >= 2 RETURN i", false] ]; queries.forEach(function(query) { - var result = AQL_EXPLAIN(query, { }, paramEnabled); - assertEqual([ ], removeAlwaysOnClusterRules(result.plan.rules), query); + var result = AQL_EXPLAIN(query[0], { }, paramEnabled); + if (db._engine().name !== "rocksdb" || query[1]) { + assertEqual([ ], removeAlwaysOnClusterRules(result.plan.rules), query); + } }); }, diff --git a/js/server/tests/aql/aql-queries-array.js b/js/server/tests/aql/aql-queries-array.js index 4da81a55fb..b4896502d1 100644 --- a/js/server/tests/aql/aql-queries-array.js +++ b/js/server/tests/aql/aql-queries-array.js @@ -443,7 +443,7 @@ function arrayIndexNonArraySuite () { var allIndexes = col.getIndexes(true); assertEqual(allIndexes.length, 2, "We have more than one index!"); var idx = allIndexes[1]; - if (! isCluster) { + if (! isCluster && db._engine().name === "mmfiles") { switch (idx.type) { case "hash": assertEqual(idx.figures.totalUsed, count); diff --git a/js/server/tests/aql/aql-queries-optimizer-in-noncluster.js b/js/server/tests/aql/aql-queries-optimizer-in-noncluster.js index cd1e854468..fb433268be 100644 --- a/js/server/tests/aql/aql-queries-optimizer-in-noncluster.js +++ b/js/server/tests/aql/aql-queries-optimizer-in-noncluster.js @@ -32,6 +32,7 @@ var jsunity = require("jsunity"); var internal = require("internal"); var helper = require("@arangodb/aql-helper"); var getQueryResults = helper.getQueryResults; +var db = internal.db; //////////////////////////////////////////////////////////////////////////////// /// @brief test suite @@ -912,7 +913,11 @@ function ahuacatlQueryOptimizerInTestSuite () { } c.ensureHashIndex("value"); var query = "FOR x IN " + cn + " FILTER (x.value > 3 || x.value < 90) RETURN x.value"; - ruleIsNotUsed(query); + if (db._engine().name === "rocksdb") { + ruleIsUsed(query); + } else { + ruleIsNotUsed(query); + } }, testOverlappingRangesListSkiplist2 : function () { @@ -945,7 +950,11 @@ function ahuacatlQueryOptimizerInTestSuite () { } c.ensureHashIndex("value"); var query = "FOR i IN " + cn + " FILTER i.value == 8 || i.value <= 7 RETURN i.value"; - ruleIsNotUsed(query); + if (db._engine().name === "rocksdb") { + ruleIsUsed(query); + } else { + ruleIsNotUsed(query); + } }, testNestedOrHashIndex : function () { diff --git a/js/server/tests/aql/aql-queries-optimizer-limit-noncluster.js b/js/server/tests/aql/aql-queries-optimizer-limit-noncluster.js index dd55765caf..947c934819 100644 --- a/js/server/tests/aql/aql-queries-optimizer-limit-noncluster.js +++ b/js/server/tests/aql/aql-queries-optimizer-limit-noncluster.js @@ -32,6 +32,7 @@ var jsunity = require("jsunity"); var internal = require("internal"); var helper = require("@arangodb/aql-helper"); var getQueryResults = helper.getQueryResults; +var db = internal.db; //////////////////////////////////////////////////////////////////////////////// /// @brief test suite @@ -472,7 +473,11 @@ function ahuacatlQueryOptimizerLimitTestSuite () { assertEqual(21, actual[1].value); assertEqual(29, actual[9].value); - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query)); + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// @@ -490,7 +495,11 @@ function ahuacatlQueryOptimizerLimitTestSuite () { assertEqual(docCount - 11 - i, actual[i].value); } - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "SortNode", "LimitNode", "ReturnNode" ], explain(query)); + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "LimitNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "SortNode", "LimitNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// @@ -508,7 +517,13 @@ function ahuacatlQueryOptimizerLimitTestSuite () { assertEqual(21, actual[1].value); assertEqual(29, actual[9].value); - assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query)); + if (db._engine().name === "rocksdb") { + assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "FilterNode", + "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query)); + } else { + assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", + "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query)); + } }, //////////////////////////////////////////////////////////////////////////////// diff --git a/js/server/tests/recovery/create-collection-tmpfile.js b/js/server/tests/recovery/create-collection-tmpfile-mmfiles.js similarity index 100% rename from js/server/tests/recovery/create-collection-tmpfile.js rename to js/server/tests/recovery/create-collection-tmpfile-mmfiles.js diff --git a/js/server/tests/recovery/die-during-collector.js b/js/server/tests/recovery/die-during-collector-mmfiles.js similarity index 100% rename from js/server/tests/recovery/die-during-collector.js rename to js/server/tests/recovery/die-during-collector-mmfiles.js diff --git a/js/server/tests/recovery/drop-database-only-tmp.js b/js/server/tests/recovery/drop-database-only-tmp-mmfiles.js similarity index 100% rename from js/server/tests/recovery/drop-database-only-tmp.js rename to js/server/tests/recovery/drop-database-only-tmp-mmfiles.js diff --git a/js/server/tests/recovery/empty-logfiles.js b/js/server/tests/recovery/empty-logfiles-mmfiles.js similarity index 100% rename from js/server/tests/recovery/empty-logfiles.js rename to js/server/tests/recovery/empty-logfiles-mmfiles.js diff --git a/js/server/tests/recovery/foxx-directories.js b/js/server/tests/recovery/foxx-directories.js index 5107170d60..389ebf7773 100644 --- a/js/server/tests/recovery/foxx-directories.js +++ b/js/server/tests/recovery/foxx-directories.js @@ -83,7 +83,7 @@ function recoverySuite () { assertTrue(fs.isDirectory(fs.join(appPath, 'UnitTestsRecovery1'))); assertTrue(fs.isFile(fs.join(appPath, 'UnitTestsRecovery1', 'foo.json'))); - assertTrue(fs.isDirectory(fs.join(appPath, 'UnitTestsRecovery2'))); + assertFalse(fs.isDirectory(fs.join(appPath, 'UnitTestsRecovery2'))); assertFalse(fs.isFile(fs.join(appPath, 'UnitTestsRecovery2', 'bar.json'))); } diff --git a/js/server/tests/recovery/leftover-collection-directory.js b/js/server/tests/recovery/leftover-collection-directory-mmfiles.js similarity index 100% rename from js/server/tests/recovery/leftover-collection-directory.js rename to js/server/tests/recovery/leftover-collection-directory-mmfiles.js diff --git a/js/server/tests/recovery/leftover-database-directory.js b/js/server/tests/recovery/leftover-database-directory-mmfiles.js similarity index 100% rename from js/server/tests/recovery/leftover-database-directory.js rename to js/server/tests/recovery/leftover-database-directory-mmfiles.js diff --git a/js/server/tests/replication/replication-ongoing.js b/js/server/tests/replication/replication-ongoing.js index 747fe5ad29..353ca50f75 100644 --- a/js/server/tests/replication/replication-ongoing.js +++ b/js/server/tests/replication/replication-ongoing.js @@ -231,6 +231,69 @@ function ReplicationSuite() { db._drop(cn); db._drop(cn2); }, + + //////////////////////////////////////////////////////////////////////////////// + /// @brief test collection creation + //////////////////////////////////////////////////////////////////////////////// + + testCreateCollection: function() { + connectToMaster(); + + compare( + function(state) { + }, + + function(state) { + db._create(cn); + for (var i = 0; i < 100; ++i) { + db._collection(cn).save({ + value: i + }); + } + internal.wal.flush(true, true); + }, + + function(state) { + return true; + }, + + function(state) { + assertTrue(db._collection(cn).count() === 100); + } + ); + }, + + //////////////////////////////////////////////////////////////////////////////// + /// @brief test collection dropping + //////////////////////////////////////////////////////////////////////////////// + + testDropCollection: function() { + connectToMaster(); + + compare( + function(state) { + }, + + function(state) { + db._create(cn); + for (var i = 0; i < 100; ++i) { + db._collection(cn).save({ + value: i + }); + } + db._drop(cn); + internal.wal.flush(true, true); + }, + + function(state) { + return true; + }, + + function(state) { + assertNull(db._collection(cn)); + } + ); + }, //////////////////////////////////////////////////////////////////////////////// /// @brief test require from present diff --git a/lib/ApplicationFeatures/RocksDBOptionFeature.cpp b/lib/ApplicationFeatures/RocksDBOptionFeature.cpp new file mode 100644 index 0000000000..bd9c78f4db --- /dev/null +++ b/lib/ApplicationFeatures/RocksDBOptionFeature.cpp @@ -0,0 +1,177 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Jan Christoph Uhde +//////////////////////////////////////////////////////////////////////////////// + +#include "RocksDBOptionFeature.h" +#include "Basics/Exceptions.h" +#include "Basics/FileUtils.h" +#include "Basics/tri-strings.h" +#include "Logger/Logger.h" +#include "ProgramOptions/ProgramOptions.h" +#include "ProgramOptions/Section.h" +#include "RestServer/DatabasePathFeature.h" + +using namespace arangodb; +using namespace arangodb::application_features; +using namespace arangodb::options; + +RocksDBOptionFeature::RocksDBOptionFeature( + application_features::ApplicationServer* server) + : application_features::ApplicationFeature(server, "RocksDBOption"), + _writeBufferSize(0), + _maxWriteBufferNumber(2), + _delayedWriteRate(2 * 1024 * 1024), + _minWriteBufferNumberToMerge(1), + _numLevels(4), + _maxBytesForLevelBase(256 * 1024 * 1024), + _maxBytesForLevelMultiplier(10), + _baseBackgroundCompactions(1), + _maxBackgroundCompactions(1), + _maxLogFileSize(0), + _keepLogFileNum(1000), + _logFileTimeToRoll(0), + _compactionReadaheadSize(0), + _verifyChecksumsInCompaction(true), + _optimizeFiltersForHits(true) +{ + setOptional(true); + requiresElevatedPrivileges(false); + startsAfter("DatabasePath"); +} + +void RocksDBOptionFeature::collectOptions(std::shared_ptr options) { + options->addSection("rocksdb", "Configure the RocksDB engine"); + + options->addObsoleteOption( + "--rocksdb.enabled", + "obsolete always active - Whether or not the RocksDB engine is enabled for the persistent index", + true); + + options->addOption( + "--rocksdb.write-buffer-size", + "amount of data to build up in memory before converting to a sorted on-disk file (0 = disabled)", + new UInt64Parameter(&_writeBufferSize)); + + options->addOption( + "--rocksdb.max-write-buffer-number", + "maximum number of write buffers that built up in memory", + new UInt64Parameter(&_maxWriteBufferNumber)); + + options->addHiddenOption( + "--rocksdb.delayed_write_rate", + "limited write rate to DB (in bytes per second) if we are writing to the last " + "mem table allowed and we allow more than 3 mem tables", + new UInt64Parameter(&_delayedWriteRate)); + + options->addOption( + "--rocksdb.min-write-buffer-number-to-merge", + "minimum number of write buffers that will be merged together before writing " + "to storage", + new UInt64Parameter(&_minWriteBufferNumberToMerge)); + + options->addOption( + "--rocksdb.num-levels", + "number of levels for the database", + new UInt64Parameter(&_numLevels)); + + options->addHiddenOption( + "--rocksdb.max-bytes-for-level-base", + "control maximum total data size for a level", + new UInt64Parameter(&_maxBytesForLevelBase)); + + options->addOption( + "--rocksdb.max-bytes-for-level-multiplier", + "control maximum total data size for a level", + new UInt64Parameter(&_maxBytesForLevelMultiplier)); + + options->addOption( + "--rocksdb.verify-checksums-in-compation", + "if true, compaction will verify checksum on every read that happens " + "as part of compaction", + new BooleanParameter(&_verifyChecksumsInCompaction)); + + options->addOption( + "--rocksdb.optimize-filters-for-hits", + "this flag specifies that the implementation should optimize the filters " + "mainly for cases where keys are found rather than also optimize for keys " + "missed. This would be used in cases where the application knows that " + "there are very few misses or the performance in the case of misses is not " + "important", + new BooleanParameter(&_optimizeFiltersForHits)); + + options->addOption( + "--rocksdb.base-background-compactions", + "suggested number of concurrent background compaction jobs", + new UInt64Parameter(&_baseBackgroundCompactions)); + + options->addOption( + "--rocksdb.max-background-compactions", + "maximum number of concurrent background compaction jobs", + new UInt64Parameter(&_maxBackgroundCompactions)); + + options->addOption( + "--rocksdb.max-log-file-size", + "specify the maximal size of the info log file", + new UInt64Parameter(&_maxLogFileSize)); + + options->addOption( + "--rocksdb.keep-log-file-num", + "maximal info log files to be kept", + new UInt64Parameter(&_keepLogFileNum)); + + options->addOption( + "--rocksdb.log-file-time-to-roll", + "time for the info log file to roll (in seconds). " + "If specified with non-zero value, log file will be rolled " + "if it has been active longer than `log_file_time_to_roll`", + new UInt64Parameter(&_logFileTimeToRoll)); + + options->addOption( + "--rocksdb.compaction-read-ahead-size", + "if non-zero, we perform bigger reads when doing compaction. If you're " + "running RocksDB on spinning disks, you should set this to at least 2MB. " + "that way RocksDB's compaction is doing sequential instead of random reads.", + new UInt64Parameter(&_compactionReadaheadSize)); +} + +void RocksDBOptionFeature::validateOptions(std::shared_ptr options) { + if (_writeBufferSize > 0 && _writeBufferSize < 1024 * 1024) { + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.write-buffer-size'"; + FATAL_ERROR_EXIT(); + } + if (_maxBytesForLevelMultiplier == 0) { + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.max-bytes-for-level-multiplier'"; + FATAL_ERROR_EXIT(); + } + if (_numLevels < 1 || _numLevels > 20) { + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.num-levels'"; + FATAL_ERROR_EXIT(); + } + if (_baseBackgroundCompactions < 1 || _baseBackgroundCompactions > 64) { + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.base-background-compactions'"; + FATAL_ERROR_EXIT(); + } + if (_maxBackgroundCompactions < _baseBackgroundCompactions) { + _maxBackgroundCompactions = _baseBackgroundCompactions; + } +} + diff --git a/lib/ApplicationFeatures/RocksDBOptionFeature.h b/lib/ApplicationFeatures/RocksDBOptionFeature.h new file mode 100644 index 0000000000..754fa9ee3d --- /dev/null +++ b/lib/ApplicationFeatures/RocksDBOptionFeature.h @@ -0,0 +1,69 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Jan Christoph Uhde +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_APPLICATION_FEATURES_ROCKSDB_OPTION_FEATURE_H +#define ARANGODB_APPLICATION_FEATURES_ROCKSDB_OPTION_FEATURE_H 1 + +#include "ApplicationFeatures/ApplicationFeature.h" +#include "Basics/Common.h" +#include "VocBase/voc-types.h" + +namespace arangodb { + +// This feature is used to configure RocksDB in a central place. +// +// The RocksDB-Storage-Engine and the MMFiles-Persistent-Index +// that are never activated at the same time take options set +// in this feature + +class RocksDBOptionFeature final : public application_features::ApplicationFeature { + public: + explicit RocksDBOptionFeature(application_features::ApplicationServer* server); + ~RocksDBOptionFeature(){}; + + void collectOptions(std::shared_ptr) override final; + void validateOptions(std::shared_ptr) override final; + void prepare() override final {}; + void start() override final {} + void unprepare() override final {} + + uint64_t _writeBufferSize; + uint64_t _maxWriteBufferNumber; + uint64_t _delayedWriteRate; + uint64_t _minWriteBufferNumberToMerge; + uint64_t _numLevels; + uint64_t _maxBytesForLevelBase; + uint64_t _maxBytesForLevelMultiplier; + uint64_t _baseBackgroundCompactions; + uint64_t _maxBackgroundCompactions; + uint64_t _maxLogFileSize; + uint64_t _keepLogFileNum; + uint64_t _logFileTimeToRoll; + uint64_t _compactionReadaheadSize; + bool _verifyChecksumsInCompaction; + bool _optimizeFiltersForHits; +}; + +} + +#endif diff --git a/lib/Basics/Exceptions.cpp b/lib/Basics/Exceptions.cpp index 800da5f963..1e8c410360 100644 --- a/lib/Basics/Exceptions.cpp +++ b/lib/Basics/Exceptions.cpp @@ -60,7 +60,6 @@ Exception::Exception(arangodb::Result&& result, char const* file, int line) appendLocation(); } - /// @brief constructor, for creating an exception with an already created /// error message (normally based on error templates containing %s, %d etc.) Exception::Exception(int code, std::string const& errorMessage, diff --git a/lib/CMakeLists.txt b/lib/CMakeLists.txt index 0721f2d408..6aea494f18 100644 --- a/lib/CMakeLists.txt +++ b/lib/CMakeLists.txt @@ -120,6 +120,7 @@ add_library(${LIB_ARANGO} STATIC ApplicationFeatures/NonceFeature.cpp ApplicationFeatures/PageSizeFeature.cpp ApplicationFeatures/PrivilegeFeature.cpp + ApplicationFeatures/RocksDBOptionFeature.cpp ApplicationFeatures/ShutdownFeature.cpp ApplicationFeatures/TempFeature.cpp ApplicationFeatures/V8PlatformFeature.cpp diff --git a/lib/Logger/LogTopic.cpp b/lib/Logger/LogTopic.cpp index 30edc58b54..8ef7ac4b38 100644 --- a/lib/Logger/LogTopic.cpp +++ b/lib/Logger/LogTopic.cpp @@ -49,7 +49,7 @@ LogTopic Logger::CONFIG("config"); LogTopic Logger::DATAFILES("datafiles", LogLevel::INFO); LogTopic Logger::DEVEL("development", LogLevel::FATAL); LogTopic Logger::ENGINES("engines", LogLevel::INFO); -LogTopic Logger::FIXME("fixme", LogLevel::INFO); +LogTopic Logger::FIXME("general", LogLevel::INFO); LogTopic Logger::GRAPHS("graphs", LogLevel::INFO); LogTopic Logger::HEARTBEAT("heartbeat", LogLevel::INFO); LogTopic Logger::MEMORY("memory", LogLevel::FATAL); // suppress @@ -136,7 +136,7 @@ LogTopic::LogTopic(std::string const& name, LogLevel level) : _id(NEXT_TOPIC_ID.fetch_add(1, std::memory_order_seq_cst)), _name(name), _level(level) { - if (name != "fixme") { + if (name != "fixme" && name != "general") { // "fixme" is a remainder from ArangoDB < 3.2, when it was // allowed to log messages without a topic. From 3.2 onwards, // logging is always topic-based, and all previously topicless diff --git a/lib/Logger/Logger.cpp b/lib/Logger/Logger.cpp index 842cb12b55..96b3be355b 100644 --- a/lib/Logger/Logger.cpp +++ b/lib/Logger/Logger.cpp @@ -111,9 +111,9 @@ void Logger::setLogLevel(std::string const& levelName) { if (isGeneral) { Logger::setLogLevel(level); - // setting the log level for topic "fixme" is required here, too, + // setting the log level for topic "general" is required here, too, // as "fixme" is the previous general log topic... - LogTopic::setLogLevel(std::string("fixme"), level); + LogTopic::setLogLevel(std::string("general"), level); } else { LogTopic::setLogLevel(v[0], level); }