mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'engine-api' of github.com:arangodb/arangodb into engine-api
This commit is contained in:
commit
ea58b36bd7
|
@ -596,20 +596,26 @@ if test -n "${ENTERPRISE_GIT_URL}" ; then
|
|||
fi
|
||||
echo "I'm on Branch: ${GITARGS}"
|
||||
fi
|
||||
|
||||
|
||||
if test "${EP_GITARGS}" != "${GITARGS}"; then
|
||||
git checkout master;
|
||||
fi
|
||||
git fetch --tags;
|
||||
git pull --all;
|
||||
if test "${EP_GITARGS}" != "${GITARGS}"; then
|
||||
if git pull --all; then
|
||||
if test "${EP_GITARGS}" != "${GITARGS}"; then
|
||||
git checkout ${GITARGS};
|
||||
fi
|
||||
else
|
||||
git checkout master;
|
||||
git pull --all;
|
||||
git fetch --tags;
|
||||
git checkout ${GITARGS};
|
||||
fi
|
||||
${FINAL_PULL}
|
||||
)
|
||||
fi
|
||||
|
||||
if test ${DOWNLOAD_STARTER} == 1; then
|
||||
if test "${DOWNLOAD_STARTER}" == 1; then
|
||||
# we utilize https://developer.github.com/v3/repos/ to get the newest release:
|
||||
STARTER_REV=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases |grep tag_name |head -n 1 |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
STARTER_URL=`curl -s https://api.github.com/repos/arangodb-helper/ArangoDBStarter/releases/tags/${STARTER_REV} |grep browser_download_url |grep "${OSNAME}" |${SED} -e "s;.*: ;;" -e 's;";;g' -e 's;,;;'`
|
||||
|
|
|
@ -284,16 +284,16 @@ valgrind could look like this. Options are passed as regular long values in the
|
|||
syntax --option value --sub:option value. Using Valgrind could look like this:
|
||||
|
||||
./scripts/unittest single_server --test js/server/tests/aql/aql-escaping.js \
|
||||
--extraargs:server.threads 1 \
|
||||
--extraargs:scheduler.threads 1 \
|
||||
--extraargs:javascript.gc-frequency 1000000 \
|
||||
--extraargs:javascript.gc-interval 65536 \
|
||||
--extraArgs:server.threads 1 \
|
||||
--extraArgs:scheduler.threads 1 \
|
||||
--extraArgs:javascript.gc-frequency 1000000 \
|
||||
--extraArgs:javascript.gc-interval 65536 \
|
||||
--javascript.v8-contexts 2 \
|
||||
--valgrind /usr/bin/valgrind \
|
||||
--valgrindargs:log-file /tmp/valgrindlog.%p
|
||||
|
||||
- we specify the test to execute
|
||||
- we specify some arangod arguments via --extraargs which increase the server performance
|
||||
- we specify some arangod arguments via --extraArgs which increase the server performance
|
||||
- we specify to run using valgrind (this is supported by all facilities)
|
||||
- we specify some valgrind commandline arguments
|
||||
|
||||
|
|
|
@ -178,7 +178,8 @@ function main(argv) {
|
|||
options = internal.parseArgv(argv, 0); // parse option with parseArgv function
|
||||
}
|
||||
} catch (x) {
|
||||
print("failed to parse the json options: " + x.message);
|
||||
print("failed to parse the json options: " + x.message + "\n" + String(x.stack));
|
||||
print("argv: ", argv);
|
||||
return -1;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -192,9 +192,9 @@ void BaseTraverserEngine::getVertexData(VPackSlice vertex, VPackBuilder& builder
|
|||
auto shards = _vertexShards.find(name);
|
||||
if (shards == _vertexShards.end()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED,
|
||||
"Collection not known to Traversal " +
|
||||
name + " please add 'WITH " + name +
|
||||
"' as the first line in your AQL");
|
||||
"collection not known to traversal: '" +
|
||||
name + "'. please add 'WITH " + name +
|
||||
"' as the first line in your AQL query");
|
||||
// The collection is not known here!
|
||||
// Maybe handle differently
|
||||
}
|
||||
|
@ -245,9 +245,9 @@ void BaseTraverserEngine::getVertexData(VPackSlice vertex, size_t depth,
|
|||
auto shards = _vertexShards.find(name);
|
||||
if (shards == _vertexShards.end()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED,
|
||||
"Collection not known to Traversal " +
|
||||
name + " please add 'WITH " + name +
|
||||
"' as the first line in your AQL");
|
||||
"collection not known to traversal: '" +
|
||||
name + "'. please add 'WITH " + name +
|
||||
"' as the first line in your AQL query");
|
||||
}
|
||||
builder.add(v);
|
||||
for (std::string const& shard : shards->second) {
|
||||
|
|
|
@ -206,7 +206,7 @@ class AttributeWeightShortestPathFinder : public ShortestPathFinder {
|
|||
/// @brief create the PathFinder
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
AttributeWeightShortestPathFinder(ShortestPathOptions* options);
|
||||
explicit AttributeWeightShortestPathFinder(ShortestPathOptions* options);
|
||||
|
||||
~AttributeWeightShortestPathFinder();
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ class ConstantWeightShortestPathFinder : public ShortestPathFinder {
|
|||
};
|
||||
|
||||
public:
|
||||
ConstantWeightShortestPathFinder(ShortestPathOptions* options);
|
||||
explicit ConstantWeightShortestPathFinder(ShortestPathOptions* options);
|
||||
|
||||
~ConstantWeightShortestPathFinder();
|
||||
|
||||
|
|
|
@ -21,16 +21,17 @@
|
|||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "MMFilesPersistentIndexFeature.h"
|
||||
#include "ApplicationFeatures/RocksDBOptionFeature.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/FileUtils.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesPersistentIndexFeature.h"
|
||||
#include "MMFiles/MMFilesPersistentIndexKeyComparator.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "ProgramOptions/Section.h"
|
||||
#include "RestServer/DatabasePathFeature.h"
|
||||
#include "MMFiles/MMFilesPersistentIndexKeyComparator.h"
|
||||
|
||||
|
||||
#include <rocksdb/db.h>
|
||||
#include <rocksdb/convenience.h>
|
||||
#include <rocksdb/env.h>
|
||||
|
@ -55,18 +56,11 @@ static MMFilesPersistentIndexFeature* Instance = nullptr;
|
|||
MMFilesPersistentIndexFeature::MMFilesPersistentIndexFeature(
|
||||
application_features::ApplicationServer* server)
|
||||
: application_features::ApplicationFeature(server, "MMFilesPersistentIndex"),
|
||||
_db(nullptr), _comparator(nullptr), _path(), _active(true),
|
||||
_writeBufferSize(0), _maxWriteBufferNumber(2),
|
||||
_delayedWriteRate(2 * 1024 * 1024), _minWriteBufferNumberToMerge(1),
|
||||
_numLevels(4), _maxBytesForLevelBase(256 * 1024 * 1024),
|
||||
_maxBytesForLevelMultiplier(10), _verifyChecksumsInCompaction(true),
|
||||
_optimizeFiltersForHits(true), _baseBackgroundCompactions(1),
|
||||
_maxBackgroundCompactions(1), _maxLogFileSize(0),
|
||||
_keepLogFileNum(1000), _logFileTimeToRoll(0), _compactionReadaheadSize(0) {
|
||||
_db(nullptr), _comparator(nullptr), _path()
|
||||
{
|
||||
setOptional(true);
|
||||
requiresElevatedPrivileges(false);
|
||||
startsAfter("DatabasePath");
|
||||
|
||||
startsAfter("RocksDBOption");
|
||||
onlyEnabledWith("MMFilesEngine");
|
||||
}
|
||||
|
||||
|
@ -82,124 +76,9 @@ MMFilesPersistentIndexFeature::~MMFilesPersistentIndexFeature() {
|
|||
}
|
||||
|
||||
void MMFilesPersistentIndexFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
||||
options->addSection("rocksdb", "Configure the RocksDB engine");
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.enabled",
|
||||
"Whether or not the RocksDB engine is enabled",
|
||||
new BooleanParameter(&_active));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.write-buffer-size",
|
||||
"amount of data to build up in memory before converting to a sorted on-disk file (0 = disabled)",
|
||||
new UInt64Parameter(&_writeBufferSize));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-write-buffer-number",
|
||||
"maximum number of write buffers that built up in memory",
|
||||
new UInt64Parameter(&_maxWriteBufferNumber));
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.delayed_write_rate",
|
||||
"limited write rate to DB (in bytes per second) if we are writing to the last "
|
||||
"mem table allowed and we allow more than 3 mem tables",
|
||||
new UInt64Parameter(&_delayedWriteRate));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.min-write-buffer-number-to-merge",
|
||||
"minimum number of write buffers that will be merged together before writing "
|
||||
"to storage",
|
||||
new UInt64Parameter(&_minWriteBufferNumberToMerge));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.num-levels",
|
||||
"number of levels for the database",
|
||||
new UInt64Parameter(&_numLevels));
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.max-bytes-for-level-base",
|
||||
"control maximum total data size for a level",
|
||||
new UInt64Parameter(&_maxBytesForLevelBase));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-bytes-for-level-multiplier",
|
||||
"control maximum total data size for a level",
|
||||
new UInt64Parameter(&_maxBytesForLevelMultiplier));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.verify-checksums-in-compation",
|
||||
"if true, compaction will verify checksum on every read that happens "
|
||||
"as part of compaction",
|
||||
new BooleanParameter(&_verifyChecksumsInCompaction));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.optimize-filters-for-hits",
|
||||
"this flag specifies that the implementation should optimize the filters "
|
||||
"mainly for cases where keys are found rather than also optimize for keys "
|
||||
"missed. This would be used in cases where the application knows that "
|
||||
"there are very few misses or the performance in the case of misses is not "
|
||||
"important",
|
||||
new BooleanParameter(&_optimizeFiltersForHits));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.base-background-compactions",
|
||||
"suggested number of concurrent background compaction jobs",
|
||||
new UInt64Parameter(&_baseBackgroundCompactions));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-background-compactions",
|
||||
"maximum number of concurrent background compaction jobs",
|
||||
new UInt64Parameter(&_maxBackgroundCompactions));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-log-file-size",
|
||||
"specify the maximal size of the info log file",
|
||||
new UInt64Parameter(&_maxLogFileSize));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.keep-log-file-num",
|
||||
"maximal info log files to be kept",
|
||||
new UInt64Parameter(&_keepLogFileNum));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.log-file-time-to-roll",
|
||||
"time for the info log file to roll (in seconds). "
|
||||
"If specified with non-zero value, log file will be rolled "
|
||||
"if it has been active longer than `log_file_time_to_roll`",
|
||||
new UInt64Parameter(&_logFileTimeToRoll));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.compaction-read-ahead-size",
|
||||
"if non-zero, we perform bigger reads when doing compaction. If you're "
|
||||
"running RocksDB on spinning disks, you should set this to at least 2MB. "
|
||||
"that way RocksDB's compaction is doing sequential instead of random reads.",
|
||||
new UInt64Parameter(&_compactionReadaheadSize));
|
||||
}
|
||||
|
||||
void MMFilesPersistentIndexFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
||||
if (!_active) {
|
||||
forceDisable();
|
||||
} else {
|
||||
if (_writeBufferSize > 0 && _writeBufferSize < 1024 * 1024) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.write-buffer-size'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_maxBytesForLevelMultiplier == 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.max-bytes-for-level-multiplier'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_numLevels < 1 || _numLevels > 20) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.num-levels'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_baseBackgroundCompactions < 1 || _baseBackgroundCompactions > 64) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.base-background-compactions'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_maxBackgroundCompactions < _baseBackgroundCompactions) {
|
||||
_maxBackgroundCompactions = _baseBackgroundCompactions;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void MMFilesPersistentIndexFeature::start() {
|
||||
|
@ -209,45 +88,47 @@ void MMFilesPersistentIndexFeature::start() {
|
|||
return;
|
||||
}
|
||||
|
||||
auto* opts = ApplicationServer::getFeature<arangodb::RocksDBOptionFeature>("RocksDBOption");
|
||||
|
||||
// set the database sub-directory for RocksDB
|
||||
auto database = ApplicationServer::getFeature<DatabasePathFeature>("DatabasePath");
|
||||
_path = database->subdirectoryName("rocksdb");
|
||||
|
||||
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "initializing rocksdb, path: " << _path;
|
||||
|
||||
|
||||
_comparator = new MMFilesPersistentIndexKeyComparator();
|
||||
|
||||
|
||||
rocksdb::BlockBasedTableOptions tableOptions;
|
||||
tableOptions.cache_index_and_filter_blocks = true;
|
||||
tableOptions.filter_policy.reset(rocksdb::NewBloomFilterPolicy(12, false));
|
||||
|
||||
|
||||
// TODO: using the prefix extractor will lead to the comparator being
|
||||
// called with just the key prefix (which the comparator currently cannot handle)
|
||||
// _options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(MMFilesPersistentIndex::minimalPrefixSize()));
|
||||
// _options.table_factory.reset(rocksdb::NewBlockBasedTableFactory(tableOptions));
|
||||
|
||||
|
||||
_options.create_if_missing = true;
|
||||
_options.max_open_files = -1;
|
||||
_options.comparator = _comparator;
|
||||
|
||||
_options.write_buffer_size = static_cast<size_t>(_writeBufferSize);
|
||||
_options.max_write_buffer_number = static_cast<int>(_maxWriteBufferNumber);
|
||||
_options.delayed_write_rate = _delayedWriteRate;
|
||||
_options.min_write_buffer_number_to_merge = static_cast<int>(_minWriteBufferNumberToMerge);
|
||||
_options.num_levels = static_cast<int>(_numLevels);
|
||||
_options.max_bytes_for_level_base = _maxBytesForLevelBase;
|
||||
_options.max_bytes_for_level_multiplier = static_cast<int>(_maxBytesForLevelMultiplier);
|
||||
_options.verify_checksums_in_compaction = _verifyChecksumsInCompaction;
|
||||
_options.optimize_filters_for_hits = _optimizeFiltersForHits;
|
||||
|
||||
_options.base_background_compactions = static_cast<int>(_baseBackgroundCompactions);
|
||||
_options.max_background_compactions = static_cast<int>(_maxBackgroundCompactions);
|
||||
|
||||
_options.max_log_file_size = static_cast<size_t>(_maxLogFileSize);
|
||||
_options.keep_log_file_num = static_cast<size_t>(_keepLogFileNum);
|
||||
_options.log_file_time_to_roll = static_cast<size_t>(_logFileTimeToRoll);
|
||||
_options.compaction_readahead_size = static_cast<size_t>(_compactionReadaheadSize);
|
||||
|
||||
_options.write_buffer_size = static_cast<size_t>(opts->_writeBufferSize);
|
||||
_options.max_write_buffer_number = static_cast<int>(opts->_maxWriteBufferNumber);
|
||||
_options.delayed_write_rate = opts->_delayedWriteRate;
|
||||
_options.min_write_buffer_number_to_merge = static_cast<int>(opts->_minWriteBufferNumberToMerge);
|
||||
_options.num_levels = static_cast<int>(opts->_numLevels);
|
||||
_options.max_bytes_for_level_base = opts->_maxBytesForLevelBase;
|
||||
_options.max_bytes_for_level_multiplier = static_cast<int>(opts->_maxBytesForLevelMultiplier);
|
||||
_options.verify_checksums_in_compaction = opts->_verifyChecksumsInCompaction;
|
||||
_options.optimize_filters_for_hits = opts->_optimizeFiltersForHits;
|
||||
|
||||
_options.base_background_compactions = static_cast<int>(opts->_baseBackgroundCompactions);
|
||||
_options.max_background_compactions = static_cast<int>(opts->_maxBackgroundCompactions);
|
||||
|
||||
_options.max_log_file_size = static_cast<size_t>(opts->_maxLogFileSize);
|
||||
_options.keep_log_file_num = static_cast<size_t>(opts->_keepLogFileNum);
|
||||
_options.log_file_time_to_roll = static_cast<size_t>(opts->_logFileTimeToRoll);
|
||||
_options.compaction_readahead_size = static_cast<size_t>(opts->_compactionReadaheadSize);
|
||||
|
||||
if (_options.base_background_compactions > 1 || _options.max_background_compactions > 1) {
|
||||
_options.env->SetBackgroundThreads(
|
||||
(std::max)(_options.base_background_compactions, _options.max_background_compactions),
|
||||
|
@ -258,7 +139,7 @@ void MMFilesPersistentIndexFeature::start() {
|
|||
//options.block_cache_compressed = rocksdb::NewLRUCache(100 * 1048576); // 100MB compressed cache
|
||||
|
||||
rocksdb::Status status = rocksdb::OptimisticTransactionDB::Open(_options, _path, &_db);
|
||||
|
||||
|
||||
if (! status.ok()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "unable to initialize RocksDB: " << status.ToString();
|
||||
FATAL_ERROR_EXIT();
|
||||
|
@ -276,14 +157,14 @@ void MMFilesPersistentIndexFeature::unprepare() {
|
|||
rocksdb::FlushOptions options;
|
||||
options.wait = true;
|
||||
rocksdb::Status status = _db->GetBaseDB()->Flush(options);
|
||||
|
||||
|
||||
if (! status.ok()) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "error flushing data to RocksDB: " << status.ToString();
|
||||
}
|
||||
|
||||
syncWal();
|
||||
}
|
||||
|
||||
|
||||
MMFilesPersistentIndexFeature* MMFilesPersistentIndexFeature::instance() {
|
||||
return Instance;
|
||||
}
|
||||
|
@ -335,7 +216,7 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
if (!isEnabled()) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
||||
TRI_ASSERT(Instance != nullptr);
|
||||
|
||||
try {
|
||||
|
@ -344,7 +225,7 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
// create lower and upper bound for deletion
|
||||
builder.openArray();
|
||||
builder.add(VPackSlice::minKeySlice());
|
||||
builder.close();
|
||||
builder.close();
|
||||
|
||||
std::string l;
|
||||
l.reserve(prefix.size() + builder.slice().byteSize());
|
||||
|
@ -355,12 +236,12 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
l.append(reinterpret_cast<char const*>(&value), sizeof(uint64_t));
|
||||
}
|
||||
l.append(builder.slice().startAs<char const>(), builder.slice().byteSize());
|
||||
|
||||
|
||||
builder.clear();
|
||||
builder.openArray();
|
||||
builder.add(VPackSlice::maxKeySlice());
|
||||
builder.close();
|
||||
|
||||
builder.close();
|
||||
|
||||
std::string u;
|
||||
u.reserve(prefix.size() + builder.slice().byteSize());
|
||||
u.append(prefix);
|
||||
|
@ -370,8 +251,8 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
u.append(reinterpret_cast<char const*>(&value), sizeof(uint64_t));
|
||||
}
|
||||
u.append(builder.slice().startAs<char const>(), builder.slice().byteSize());
|
||||
|
||||
#if 0
|
||||
|
||||
#if 0
|
||||
for (size_t i = 0; i < prefix.size(); i += sizeof(TRI_idx_iid_t)) {
|
||||
char const* x = prefix.c_str() + i;
|
||||
size_t o;
|
||||
|
@ -381,7 +262,7 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
TRI_FreeString(TRI_CORE_MEM_ZONE, q);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "dropping RocksDB range: " << VPackSlice(l.c_str() + MMFilesPersistentIndex::keyPrefixSize()).toJson() << " - " << VPackSlice(u.c_str() + MMFilesPersistentIndex::keyPrefixSize()).toJson();
|
||||
#endif
|
||||
|
||||
|
@ -398,15 +279,15 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "RocksDB file deletion failed";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// go on and delete the remaining keys (delete files in range does not necessarily
|
||||
// find them all, just complete files)
|
||||
|
||||
|
||||
auto comparator = MMFilesPersistentIndexFeature::instance()->comparator();
|
||||
rocksdb::DB* db = _db->GetBaseDB();
|
||||
|
||||
rocksdb::WriteBatch batch;
|
||||
|
||||
|
||||
std::unique_ptr<rocksdb::Iterator> it(db->NewIterator(rocksdb::ReadOptions()));
|
||||
|
||||
it->Seek(lower);
|
||||
|
@ -416,12 +297,12 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
if (res >= 0) {
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
batch.Delete(it->key());
|
||||
|
||||
it->Next();
|
||||
}
|
||||
|
||||
|
||||
// now apply deletion batch
|
||||
rocksdb::Status status = db->Write(rocksdb::WriteOptions(), &batch);
|
||||
|
||||
|
@ -442,4 +323,3 @@ int MMFilesPersistentIndexFeature::dropPrefix(std::string const& prefix) {
|
|||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -62,27 +62,10 @@ class MMFilesPersistentIndexFeature final : public application_features::Applica
|
|||
int dropPrefix(std::string const& prefix);
|
||||
|
||||
private:
|
||||
|
||||
rocksdb::OptimisticTransactionDB* _db;
|
||||
rocksdb::Options _options;
|
||||
MMFilesPersistentIndexKeyComparator* _comparator;
|
||||
std::string _path;
|
||||
bool _active;
|
||||
uint64_t _writeBufferSize;
|
||||
uint64_t _maxWriteBufferNumber;
|
||||
uint64_t _delayedWriteRate;
|
||||
uint64_t _minWriteBufferNumberToMerge;
|
||||
uint64_t _numLevels;
|
||||
uint64_t _maxBytesForLevelBase;
|
||||
uint64_t _maxBytesForLevelMultiplier;
|
||||
bool _verifyChecksumsInCompaction;
|
||||
bool _optimizeFiltersForHits;
|
||||
uint64_t _baseBackgroundCompactions;
|
||||
uint64_t _maxBackgroundCompactions;
|
||||
uint64_t _maxLogFileSize;
|
||||
uint64_t _keepLogFileNum;
|
||||
uint64_t _logFileTimeToRoll;
|
||||
uint64_t _compactionReadaheadSize;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -1342,12 +1342,10 @@ bool MMFilesWalRecoverState::ReplayMarker(MMFilesMarker const* marker,
|
|||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME)
|
||||
<< "found drop database marker. databaseId: " << databaseId;
|
||||
|
||||
TRI_vocbase_t* vocbase = state->releaseDatabase(databaseId);
|
||||
/*TRI_vocbase_t* vocbase = */ state->releaseDatabase(databaseId);
|
||||
|
||||
if (vocbase != nullptr) {
|
||||
// ignore any potential error returned by this call
|
||||
state->databaseFeature->dropDatabase(databaseId, true, false);
|
||||
}
|
||||
// ignore any potential error returned by this call
|
||||
state->databaseFeature->dropDatabase(databaseId, true, state->isDropped(databaseId));
|
||||
|
||||
MMFilesPersistentIndexFeature::dropDatabase(databaseId);
|
||||
break;
|
||||
|
|
|
@ -834,7 +834,10 @@ int ContinuousSyncer::applyLogMarker(VPackSlice const& slice,
|
|||
}
|
||||
|
||||
else if (type == REPLICATION_COLLECTION_CREATE) {
|
||||
return createCollection(slice.get("collection"), nullptr);
|
||||
if (slice.get("collection").isObject()) {
|
||||
return createCollection(slice.get("collection"), nullptr);
|
||||
}
|
||||
return createCollection(slice.get("data"), nullptr);
|
||||
}
|
||||
|
||||
else if (type == REPLICATION_COLLECTION_DROP) {
|
||||
|
@ -938,9 +941,9 @@ int ContinuousSyncer::applyLog(SimpleHttpResult* response,
|
|||
}
|
||||
|
||||
if (ignoreCount == 0) {
|
||||
if (lineLength > 256) {
|
||||
if (lineLength > 1024) {
|
||||
errorMsg +=
|
||||
", offending marker: " + std::string(lineStart, 256) + "...";
|
||||
", offending marker: " + std::string(lineStart, 1024) + "...";
|
||||
} else {
|
||||
errorMsg +=
|
||||
", offending marker: " + std::string(lineStart, lineLength);
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "ApplicationFeatures/LanguageFeature.h"
|
||||
#include "ApplicationFeatures/NonceFeature.h"
|
||||
#include "ApplicationFeatures/PageSizeFeature.h"
|
||||
#include "ApplicationFeatures/RocksDBOptionFeature.h"
|
||||
#include "Pregel/PregelFeature.h"
|
||||
#include "ApplicationFeatures/PrivilegeFeature.h"
|
||||
#include "ApplicationFeatures/ShutdownFeature.h"
|
||||
|
@ -173,6 +174,7 @@ static int runServer(int argc, char** argv) {
|
|||
server.addFeature(new VersionFeature(&server));
|
||||
server.addFeature(new ViewTypesFeature(&server));
|
||||
server.addFeature(new WorkMonitorFeature(&server));
|
||||
server.addFeature(new RocksDBOptionFeature(&server));
|
||||
|
||||
#ifdef ARANGODB_HAVE_FORK
|
||||
server.addFeature(new DaemonFeature(&server));
|
||||
|
|
|
@ -48,34 +48,31 @@ namespace rocksdb {class TransactionDB;
|
|||
namespace arangodb {
|
||||
|
||||
class RocksDBOperationResult : public Result {
|
||||
public:
|
||||
public:
|
||||
RocksDBOperationResult()
|
||||
:Result()
|
||||
,_keySize(0)
|
||||
,_commitRequired(false)
|
||||
{}
|
||||
: Result(),
|
||||
_keySize(0),
|
||||
_commitRequired(false) {}
|
||||
|
||||
RocksDBOperationResult(Result const& other)
|
||||
: _keySize(0)
|
||||
,_commitRequired(false)
|
||||
{
|
||||
: _keySize(0),
|
||||
_commitRequired(false) {
|
||||
cloneData(other);
|
||||
}
|
||||
|
||||
RocksDBOperationResult(Result&& other)
|
||||
: _keySize(0)
|
||||
,_commitRequired(false)
|
||||
{
|
||||
: _keySize(0),
|
||||
_commitRequired(false) {
|
||||
cloneData(std::move(other));
|
||||
}
|
||||
|
||||
uint64_t keySize(){ return _keySize; }
|
||||
uint64_t keySize(uint64_t s ) { _keySize = s; return _keySize; }
|
||||
uint64_t keySize() const { return _keySize; }
|
||||
void keySize(uint64_t s) { _keySize = s; }
|
||||
|
||||
bool commitRequired(){ return _commitRequired; }
|
||||
bool commitRequired(bool cr ) { _commitRequired = cr; return _commitRequired; }
|
||||
bool commitRequired() const { return _commitRequired; }
|
||||
void commitRequired(bool cr) { _commitRequired = cr; }
|
||||
|
||||
protected:
|
||||
protected:
|
||||
uint64_t _keySize;
|
||||
bool _commitRequired;
|
||||
};
|
||||
|
|
|
@ -342,7 +342,7 @@ bool RocksDBCounterManager::parseRocksWAL() {
|
|||
iterator->Next();
|
||||
}
|
||||
|
||||
LOG_TOPIC(INFO, Logger::ENGINES) << "Finished WAL scan with "
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "finished WAL scan with "
|
||||
<< handler->deltas.size();
|
||||
for (std::pair<uint64_t, RocksDBCounterManager::CounterAdjustment> pair :
|
||||
handler->deltas) {
|
||||
|
@ -352,7 +352,7 @@ bool RocksDBCounterManager::parseRocksWAL() {
|
|||
it->second._count += pair.second.added();
|
||||
it->second._count -= pair.second.removed();
|
||||
it->second._revisionId = pair.second._revisionId;
|
||||
LOG_TOPIC(INFO, Logger::ENGINES)
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES)
|
||||
<< "WAL recovered " << pair.second.added() << " PUTs and "
|
||||
<< pair.second.removed() << " DELETEs for a total of "
|
||||
<< it->second._count;
|
||||
|
|
|
@ -22,6 +22,7 @@
|
|||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ApplicationFeatures/RocksDBOptionFeature.h"
|
||||
#include "RocksDBEngine.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/FileUtils.h"
|
||||
|
@ -78,8 +79,14 @@ std::string const RocksDBEngine::FeatureName("RocksDBEngine");
|
|||
RocksDBEngine::RocksDBEngine(application_features::ApplicationServer* server)
|
||||
: StorageEngine(server, EngineName, FeatureName, new RocksDBIndexFactory()),
|
||||
_db(nullptr),
|
||||
_cmp(new RocksDBComparator()) {
|
||||
// inherits order from StorageEngine
|
||||
_cmp(new RocksDBComparator()),
|
||||
_maxTransactionSize((std::numeric_limits<uint64_t>::max)()),
|
||||
_intermediateTransactionCommitSize(32 * 1024 * 1024),
|
||||
_intermediateTransactionCommitCount(100000),
|
||||
_intermediateTransactionCommitEnabled(false) {
|
||||
// inherits order from StorageEngine but requires RocksDBOption that are used
|
||||
// to configure this Engine and the MMFiles PesistentIndexFeature
|
||||
startsAfter("RocksDBOption");
|
||||
}
|
||||
|
||||
RocksDBEngine::~RocksDBEngine() { delete _db; }
|
||||
|
@ -93,29 +100,24 @@ void RocksDBEngine::collectOptions(
|
|||
options->addSection("rocksdb", "RocksDB engine specific configuration");
|
||||
|
||||
// control transaction size for RocksDB engine
|
||||
_maxTransactionSize =
|
||||
std::numeric_limits<uint64_t>::max(); // set sensible default value here
|
||||
options->addOption("--rocksdb.max-transaction-size",
|
||||
"transaction size limit (in bytes)",
|
||||
new UInt64Parameter(&_maxTransactionSize));
|
||||
|
||||
// control intermediate transactions in RocksDB
|
||||
_intermediateTransactionSize = _maxTransactionSize * 0.8;
|
||||
options->addOption(
|
||||
"--rocksdb.intermediate-transaction-count",
|
||||
"an intermediate commit will be triend if this count is reached",
|
||||
new UInt64Parameter(&_intermediateTransactionSize));
|
||||
"an intermediate commit will be tried when a transaction has accumulated operations of this size (in bytes)",
|
||||
new UInt64Parameter(&_intermediateTransactionCommitSize));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.intermediate-transaction-count",
|
||||
"an intermediate commit will be triend if this count is reached",
|
||||
new UInt64Parameter(&_intermediateTransactionCount));
|
||||
_intermediateTransactionCount = 100 * 1000;
|
||||
"an intermediate commit will be tried when this number of operations is reached in a transaction",
|
||||
new UInt64Parameter(&_intermediateTransactionCommitCount));
|
||||
_intermediateTransactionCommitCount = 100 * 1000;
|
||||
|
||||
_intermediateTransactionEnabled = false;
|
||||
options->addOption("--rocksdb.intermediate-transaction",
|
||||
"enable intermediate transactions",
|
||||
new BooleanParameter(&_intermediateTransactionEnabled));
|
||||
new BooleanParameter(&_intermediateTransactionCommitEnabled));
|
||||
}
|
||||
|
||||
// validate the storage engine's specific options
|
||||
|
@ -140,16 +142,36 @@ void RocksDBEngine::start() {
|
|||
}
|
||||
|
||||
// set the database sub-directory for RocksDB
|
||||
auto databasePathFeature =
|
||||
auto* databasePathFeature =
|
||||
ApplicationServer::getFeature<DatabasePathFeature>("DatabasePath");
|
||||
_path = databasePathFeature->subdirectoryName("engine-rocksdb");
|
||||
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::STARTUP) << "initializing rocksdb, path: "
|
||||
<< _path;
|
||||
|
||||
double counter_sync_seconds = 2.5;
|
||||
rocksdb::TransactionDBOptions transactionOptions;
|
||||
|
||||
double counter_sync_seconds = 2.5;
|
||||
//options imported set by RocksDBOptionFeature
|
||||
auto* opts = ApplicationServer::getFeature<arangodb::RocksDBOptionFeature>("RocksDBOption");
|
||||
_options.write_buffer_size = static_cast<size_t>(opts->_writeBufferSize);
|
||||
_options.max_write_buffer_number = static_cast<int>(opts->_maxWriteBufferNumber);
|
||||
_options.delayed_write_rate = opts->_delayedWriteRate;
|
||||
_options.min_write_buffer_number_to_merge = static_cast<int>(opts->_minWriteBufferNumberToMerge);
|
||||
_options.num_levels = static_cast<int>(opts->_numLevels);
|
||||
_options.max_bytes_for_level_base = opts->_maxBytesForLevelBase;
|
||||
_options.max_bytes_for_level_multiplier = static_cast<int>(opts->_maxBytesForLevelMultiplier);
|
||||
_options.verify_checksums_in_compaction = opts->_verifyChecksumsInCompaction;
|
||||
_options.optimize_filters_for_hits = opts->_optimizeFiltersForHits;
|
||||
|
||||
_options.base_background_compactions = static_cast<int>(opts->_baseBackgroundCompactions);
|
||||
_options.max_background_compactions = static_cast<int>(opts->_maxBackgroundCompactions);
|
||||
|
||||
_options.max_log_file_size = static_cast<size_t>(opts->_maxLogFileSize);
|
||||
_options.keep_log_file_num = static_cast<size_t>(opts->_keepLogFileNum);
|
||||
_options.log_file_time_to_roll = static_cast<size_t>(opts->_logFileTimeToRoll);
|
||||
_options.compaction_readahead_size = static_cast<size_t>(opts->_compactionReadaheadSize);
|
||||
|
||||
_options.create_if_missing = true;
|
||||
_options.max_open_files = -1;
|
||||
_options.comparator = _cmp.get();
|
||||
|
@ -207,8 +229,8 @@ transaction::ContextData* RocksDBEngine::createTransactionContextData() {
|
|||
TransactionState* RocksDBEngine::createTransactionState(
|
||||
TRI_vocbase_t* vocbase) {
|
||||
return new RocksDBTransactionState(
|
||||
vocbase, _maxTransactionSize, _intermediateTransactionEnabled,
|
||||
_intermediateTransactionSize, _intermediateTransactionCount);
|
||||
vocbase, _maxTransactionSize, _intermediateTransactionCommitEnabled,
|
||||
_intermediateTransactionCommitSize, _intermediateTransactionCommitCount);
|
||||
}
|
||||
|
||||
TransactionCollection* RocksDBEngine::createTransactionCollection(
|
||||
|
@ -624,18 +646,6 @@ void RocksDBEngine::createIndex(TRI_vocbase_t* vocbase,
|
|||
TRI_voc_cid_t collectionId,
|
||||
TRI_idx_iid_t indexId,
|
||||
arangodb::velocypack::Slice const& data) {
|
||||
/*
|
||||
rocksdb::WriteOptions options; // TODO: check which options would make sense
|
||||
auto key = RocksDBKey::Index(vocbase->id(), collectionId, indexId);
|
||||
auto value = RocksDBValue::Index(data);
|
||||
|
||||
rocksdb::Status res = _db->Put(options, key.string(), value.string());
|
||||
auto result = rocksutils::convertStatus(res);
|
||||
if (!result.ok()) {
|
||||
THROW_ARANGO_EXCEPTION(result.errorNumber());
|
||||
}
|
||||
*/
|
||||
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
|
||||
}
|
||||
|
||||
void RocksDBEngine::dropIndex(TRI_vocbase_t* vocbase,
|
||||
|
|
|
@ -266,9 +266,9 @@ class RocksDBEngine final : public StorageEngine {
|
|||
|
||||
std::unique_ptr<RocksDBCounterManager> _counterManager; // tracks the count of documents in collections
|
||||
uint64_t _maxTransactionSize; // maximum allowed size for a transaction
|
||||
uint64_t _intermediateTransactionSize; // maximum size for a transaction before a intermediate commit will be tried
|
||||
uint64_t _intermediateTransactionCount; // limit of transaction count for intermediate commit
|
||||
bool _intermediateTransactionEnabled; // allow usage of intermediate commits
|
||||
uint64_t _intermediateTransactionCommitSize; // maximum size for a transaction before a intermediate commit will be tried
|
||||
uint64_t _intermediateTransactionCommitCount; // limit of transaction count for intermediate commit
|
||||
bool _intermediateTransactionCommitEnabled; // allow usage of intermediate commits
|
||||
};
|
||||
}
|
||||
#endif
|
||||
|
|
|
@ -43,7 +43,7 @@ class RocksDBHashIndex final : public RocksDBVPackIndex {
|
|||
|
||||
bool matchesDefinition(VPackSlice const& info) const override;
|
||||
|
||||
bool isSorted() const override { return false; }
|
||||
bool isSorted() const override { return true; }
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -264,12 +264,14 @@ int RocksDBIndexFactory::enhanceIndexDefinition(VPackSlice const definition,
|
|||
enhanced.add("objectId",
|
||||
VPackValue(std::to_string(TRI_NewTickServer())));
|
||||
}
|
||||
} else {
|
||||
}
|
||||
// breaks lookupIndex()
|
||||
/*else {
|
||||
if (!definition.hasKey("objectId")) {
|
||||
// objectId missing, but must be present
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
enhanced.add("type", VPackValue(Index::oldtypeName(type)));
|
||||
|
||||
|
|
|
@ -191,7 +191,9 @@ RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(
|
|||
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_cmp(index->_cmp),
|
||||
_bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())) {
|
||||
_bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())),
|
||||
_total(0),
|
||||
_returned(0) {
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
|
|
@ -119,7 +119,8 @@ class RocksDBAnyIndexIterator final : public IndexIterator {
|
|||
RocksDBComparator const* _cmp;
|
||||
std::unique_ptr<rocksdb::Iterator> _iterator;
|
||||
RocksDBKeyBounds _bounds;
|
||||
uint64_t _total, _returned;
|
||||
uint64_t _total;
|
||||
uint64_t _returned;
|
||||
};
|
||||
|
||||
class RocksDBPrimaryIndex final : public RocksDBIndex {
|
||||
|
|
|
@ -189,8 +189,6 @@ Result RocksDBTransactionState::commitTransaction(
|
|||
_cacheTx = nullptr;
|
||||
}
|
||||
|
||||
// LOG_TOPIC(ERR, Logger::FIXME) << "#" << _id << " COMMIT";
|
||||
|
||||
rocksdb::Snapshot const* snap = this->_rocksReadOptions.snapshot;
|
||||
TRI_ASSERT(snap != nullptr);
|
||||
|
||||
|
@ -220,12 +218,6 @@ Result RocksDBTransactionState::commitTransaction(
|
|||
}
|
||||
|
||||
updateStatus(transaction::Status::COMMITTED);
|
||||
|
||||
// if a write query, clear the query cache for the participating collections
|
||||
if (AccessMode::isWriteOrExclusive(_type) && !_collections.empty() &&
|
||||
arangodb::aql::QueryCache::instance()->mayBeActive()) {
|
||||
clearQueryCache();
|
||||
}
|
||||
}
|
||||
|
||||
unuseCollections(_nestingLevel);
|
||||
|
@ -254,8 +246,6 @@ Result RocksDBTransactionState::abortTransaction(
|
|||
_cacheTx = nullptr;
|
||||
}
|
||||
|
||||
// LOG_TOPIC(ERR, Logger::FIXME) << "#" << _id << " ABORT";
|
||||
|
||||
updateStatus(transaction::Status::ABORTED);
|
||||
|
||||
if (hasOperations()) {
|
||||
|
@ -280,9 +270,9 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
|
|||
uint64_t newSize = _transactionSize + operationSize + keySize;
|
||||
if (_maxTransactionSize < newSize) {
|
||||
// we hit the transaction size limit
|
||||
std::string message = "maximal transaction size limit of " +
|
||||
std::to_string(_maxTransactionSize) +
|
||||
" bytes reached!";
|
||||
std::string message =
|
||||
"aborting transaction because maximal transaction size limit of " +
|
||||
std::to_string(_maxTransactionSize) + " bytes is reached";
|
||||
res.reset(TRI_ERROR_RESOURCE_LIMIT, message);
|
||||
return res;
|
||||
}
|
||||
|
@ -299,6 +289,12 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
|
|||
// should not fail or fail with exception
|
||||
collection->addOperation(operationType, operationSize, revisionId);
|
||||
|
||||
// clear the query cache for this collection
|
||||
if (arangodb::aql::QueryCache::instance()->mayBeActive()) {
|
||||
arangodb::aql::QueryCache::instance()->invalidate(
|
||||
_vocbase, collection->collectionName());
|
||||
}
|
||||
|
||||
switch (operationType) {
|
||||
case TRI_VOC_DOCUMENT_OPERATION_UNKNOWN:
|
||||
break;
|
||||
|
|
|
@ -835,11 +835,11 @@ bool RocksDBVPackIndex::supportsFilterCondition(
|
|||
arangodb::aql::Variable const* reference, size_t itemsInIndex,
|
||||
size_t& estimatedItems, double& estimatedCost) const {
|
||||
// HashIndex has different semantics
|
||||
if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
/*if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
SimpleAttributeEqualityMatcher matcher(_fields);
|
||||
return matcher.matchAll(this, node, reference, itemsInIndex, estimatedItems,
|
||||
estimatedCost);
|
||||
}
|
||||
}*/
|
||||
|
||||
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
|
||||
std::unordered_set<std::string> nonNullAttributes;
|
||||
|
@ -1206,10 +1206,10 @@ arangodb::aql::AstNode* RocksDBVPackIndex::specializeCondition(
|
|||
arangodb::aql::AstNode* node,
|
||||
arangodb::aql::Variable const* reference) const {
|
||||
// HashIndex uses slightly different semantics
|
||||
if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
/*if (this->type() == Index::TRI_IDX_TYPE_HASH_INDEX) {
|
||||
SimpleAttributeEqualityMatcher matcher(_fields);
|
||||
return matcher.specializeAll(this, node, reference);
|
||||
}
|
||||
}*/
|
||||
|
||||
std::unordered_map<size_t, std::vector<arangodb::aql::AstNode const*>> found;
|
||||
std::unordered_set<std::string> nonNullAttributes;
|
||||
|
|
|
@ -991,6 +991,7 @@
|
|||
template: templateEngine.createTemplate('dashboardView.ejs'),
|
||||
|
||||
render: function (modalView) {
|
||||
this.delegateEvents(this.events);
|
||||
var callback = function (enabled, modalView) {
|
||||
if (!modalView) {
|
||||
$(this.el).html(this.template.render());
|
||||
|
|
|
@ -511,11 +511,13 @@ function unitTest (cases, options) {
|
|||
print("not cleaning up since we didn't start the server ourselves\n");
|
||||
}
|
||||
|
||||
try {
|
||||
yaml.safeDump(JSON.parse(JSON.stringify(results)));
|
||||
} catch (err) {
|
||||
print(RED + 'cannot dump results: ' + String(err) + RESET);
|
||||
print(RED + require('internal').inspect(results) + RESET);
|
||||
if (options.extremeVerbosity === true) {
|
||||
try {
|
||||
print(yaml.safeDump(JSON.parse(JSON.stringify(results))));
|
||||
} catch (err) {
|
||||
print(RED + 'cannot dump results: ' + String(err) + RESET);
|
||||
print(RED + require('internal').inspect(results) + RESET);
|
||||
}
|
||||
}
|
||||
|
||||
if (jsonReply === true) {
|
||||
|
|
|
@ -515,20 +515,26 @@ ArangoCollection.prototype.lookupFulltextIndex = function (field, minLength) {
|
|||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief getIndex() wrapper to ensure consistency between mmfiles on rocksdb
|
||||
// / @brief getIndex() wrapper to ensure consistency between mmfiles and rocksdb
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
ArangoCollection.prototype.getIndexes = function (withFigures) {
|
||||
'use strict';
|
||||
var indexes = this.getIndexesPrivate(withFigures);
|
||||
if (this.type() === 3) {
|
||||
// edge collections
|
||||
var result = [];
|
||||
for (var i = 0; i < indexes.length; i++) {
|
||||
if(indexes[i].type === "edge") {
|
||||
if (indexes[i].type === "edge") {
|
||||
if (indexes[i].fields.length === 1
|
||||
&& indexes[i].fields[0] === "_from") {
|
||||
// we got two edge indexes. now pretend we only have one, and
|
||||
// make it claim it is created on _from and _to
|
||||
indexes[i].fields.push("_to");
|
||||
result.push(indexes[i]);
|
||||
} else if (indexes[i].fields.length === 2) {
|
||||
// we have an edge index with two attributes
|
||||
result.push(indexes[i]);
|
||||
}
|
||||
} else {
|
||||
result.push(indexes[i]);
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var internal = require("internal");
|
||||
var db = internal.db;
|
||||
var jsunity = require("jsunity");
|
||||
var helper = require("@arangodb/aql-helper");
|
||||
var getQueryResults = helper.getQueryResults;
|
||||
|
@ -275,7 +276,11 @@ function ahuacatlHashTestSuite () {
|
|||
|
||||
assertEqual(expected, actual);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -288,8 +293,12 @@ function ahuacatlHashTestSuite () {
|
|||
var actual = getQueryResults(query);
|
||||
|
||||
assertEqual(expected, actual);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -303,7 +312,11 @@ function ahuacatlHashTestSuite () {
|
|||
|
||||
assertEqual(expected, actual);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -316,8 +329,12 @@ function ahuacatlHashTestSuite () {
|
|||
var actual = getQueryResults(query);
|
||||
|
||||
assertEqual(expected, actual);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -330,8 +347,12 @@ function ahuacatlHashTestSuite () {
|
|||
var actual = getQueryResults(query);
|
||||
|
||||
assertEqual(expected, actual);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "SortNode", "CalculationNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
testInvalidValuesinList : function () {
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertEqual, AQL_EXECUTE, AQL_EXPLAIN */
|
||||
/*global assertEqual, assertFalse, AQL_EXECUTE, AQL_EXPLAIN */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for COLLECT w/ COUNT
|
||||
|
@ -30,6 +30,7 @@
|
|||
|
||||
var jsunity = require("jsunity");
|
||||
var db = require("@arangodb").db;
|
||||
var internal = require("internal");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
|
@ -147,7 +148,11 @@ function optimizerCollectMethodsTestSuite () {
|
|||
/// @brief expect hash COLLECT
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testHashedWithNonSortedIndex : function () {
|
||||
testHashedWithNonSortedIndexMMFiles : function () {
|
||||
if (db._engine().name !== "mmfiles") {
|
||||
return;
|
||||
}
|
||||
|
||||
c.ensureIndex({ type: "hash", fields: [ "group" ] });
|
||||
c.ensureIndex({ type: "hash", fields: [ "group", "value" ] });
|
||||
|
||||
|
@ -182,6 +187,53 @@ function optimizerCollectMethodsTestSuite () {
|
|||
assertEqual(query[1], results.json.length);
|
||||
});
|
||||
},
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief expect hash COLLECT
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testHashedWithNonSortedIndexRocksDB : function () {
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
return;
|
||||
}
|
||||
|
||||
c.ensureIndex({ type: "hash", fields: [ "group" ] });
|
||||
c.ensureIndex({ type: "hash", fields: [ "group", "value" ] });
|
||||
|
||||
var queries = [
|
||||
[ "FOR j IN " + c.name() + " COLLECT value = j RETURN value", 1500, false],
|
||||
[ "FOR j IN " + c.name() + " COLLECT value = j._key RETURN value", 1500, false],
|
||||
[ "FOR j IN " + c.name() + " COLLECT value = j.group RETURN value", 10, true],
|
||||
[ "FOR j IN " + c.name() + " COLLECT value1 = j.group, value2 = j.value RETURN [ value1, value2 ]", 1500, true ],
|
||||
[ "FOR j IN " + c.name() + " COLLECT value = j.group WITH COUNT INTO l RETURN [ value, l ]", 10, true ],
|
||||
[ "FOR j IN " + c.name() + " COLLECT value1 = j.group, value2 = j.value WITH COUNT INTO l RETURN [ value1, value2, l ]", 1500, true ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var plan = AQL_EXPLAIN(query[0]).plan;
|
||||
|
||||
var aggregateNodes = 0;
|
||||
var sortNodes = 0;
|
||||
plan.nodes.map(function(node) {
|
||||
if (node.type === "CollectNode") {
|
||||
++aggregateNodes;
|
||||
assertFalse(query[2] && node.collectOptions.method !== "sorted");
|
||||
assertEqual(query[2] ? "sorted" : "hash",
|
||||
node.collectOptions.method, query[0]);
|
||||
}
|
||||
if (node.type === "SortNode") {
|
||||
++sortNodes;
|
||||
}
|
||||
});
|
||||
|
||||
assertEqual(1, aggregateNodes);
|
||||
assertEqual(query[2] ? 0 : 1, sortNodes);
|
||||
|
||||
var results = AQL_EXECUTE(query[0]);
|
||||
assertEqual(query[1], results.json.length);
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief expect sorted COLLECT
|
||||
|
|
|
@ -1292,8 +1292,10 @@ function optimizerIndexesMultiTestSuite () {
|
|||
// Furthermore, we check the type of expression in the CalcNode
|
||||
// and the number of subnodes:
|
||||
assertEqual("CalculationNode", plan.nodes[2].type, query);
|
||||
assertEqual("SortNode", plan.nodes[3].type, query);
|
||||
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
assertEqual("SortNode", plan.nodes[3].type, query);
|
||||
}
|
||||
|
||||
var results = AQL_EXECUTE(query);
|
||||
var correct = makeResult(maker).map(function(x) { return x.a; });
|
||||
assertEqual(correct, results.json, query);
|
||||
|
@ -1350,7 +1352,9 @@ function optimizerIndexesMultiTestSuite () {
|
|||
// Furthermore, we check the type of expression in the CalcNode
|
||||
// and the number of subnodes:
|
||||
assertEqual("CalculationNode", plan.nodes[2].type, query);
|
||||
assertEqual("SortNode", plan.nodes[3].type, query);
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
assertEqual("SortNode", plan.nodes[3].type, query);
|
||||
}
|
||||
var results = AQL_EXECUTE(query);
|
||||
var correct = makeResult(maker).map(function(x) { return x.a; });
|
||||
assertEqual(correct, results.json, query);
|
||||
|
|
|
@ -200,7 +200,11 @@ function optimizerIndexesSortTestSuite () {
|
|||
/// @brief test index usage
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testSingleAttributeSortNotOptimizedAway : function () {
|
||||
testSingleAttributeSortNotOptimizedAwayMMFiles : function () {
|
||||
if (db._engine().name !== "mmfiles") {
|
||||
return;
|
||||
}
|
||||
|
||||
AQL_EXECUTE("FOR i IN " + c.name() + " UPDATE i WITH { value2: i.value, value3: i.value } IN " + c.name());
|
||||
|
||||
c.ensureHashIndex("value2");
|
||||
|
@ -229,6 +233,50 @@ function optimizerIndexesSortTestSuite () {
|
|||
});
|
||||
},
|
||||
|
||||
testSingleAttributeSortNotOptimizedAwayRocksDB : function () {
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
return;
|
||||
}
|
||||
|
||||
AQL_EXECUTE("FOR i IN " + c.name() + " UPDATE i WITH { value2: i.value, value3: i.value } IN " + c.name());
|
||||
|
||||
c.ensureHashIndex("value2");
|
||||
c.ensureHashIndex("value3");
|
||||
|
||||
var queries = [
|
||||
"FOR j IN " + c.name() + " FILTER j.value2 == 2 FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value2 RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value2 == 2 || i.value2 == 3 SORT i.value3 RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value2, i.value3 RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value2, PASSTHRU(1) RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value3 == 2 SORT i.value2 RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value3 == 2 SORT i.value3, i.value2 RETURN i.value2",
|
||||
"FOR i IN " + c.name() + " FILTER i.value3 == 2 SORT PASSTHRU(1) RETURN i.value2"
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var plan = AQL_EXPLAIN(query).plan;
|
||||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
return node.type;
|
||||
});
|
||||
|
||||
assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query);
|
||||
assertNotEqual(-1, nodeTypes.indexOf("SortNode"), query);
|
||||
});
|
||||
|
||||
queries = ["FOR i IN " + c.name() + " FILTER i.value2 == 2 || i.value2 == 3 SORT i.value2 RETURN i.value2"];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var plan = AQL_EXPLAIN(query).plan;
|
||||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
return node.type;
|
||||
});
|
||||
|
||||
assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query);
|
||||
assertEqual(-1, nodeTypes.indexOf("SortNode"), query);
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test index usage
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -355,7 +403,11 @@ function optimizerIndexesSortTestSuite () {
|
|||
/// @brief test index usage
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCannotUseHashIndexForSortIfConstRangesMore : function () {
|
||||
testCannotUseHashIndexForSortIfConstRangesMoreMMFiles : function () {
|
||||
if (db._engine().name !== "mmfiles") {
|
||||
return;
|
||||
}
|
||||
|
||||
c.ensureIndex({ type: "hash", fields: [ "value2", "value3", "value4" ] });
|
||||
|
||||
var queries = [
|
||||
|
@ -396,6 +448,52 @@ function optimizerIndexesSortTestSuite () {
|
|||
}
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test index usage
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCannotUseHashIndexForSortIfConstRangesMoreRocksDB : function () {
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
return;
|
||||
}
|
||||
|
||||
c.ensureIndex({ type: "hash", fields: [ "value2", "value3", "value4" ] });
|
||||
|
||||
var queries = [
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
|
||||
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2" ,true ],
|
||||
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 DESC, i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 DESC RETURN i.value2", true ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var plan = AQL_EXPLAIN(query[0]).plan;
|
||||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
return node.type;
|
||||
});
|
||||
|
||||
assertEqual(-1, nodeTypes.indexOf("SortNode"), query[0]);
|
||||
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test index usage
|
||||
|
|
|
@ -946,7 +946,11 @@ function optimizerIndexesTestSuite () {
|
|||
walker(plan.nodes, function (node) {
|
||||
if (node.type === "IndexNode") {
|
||||
++indexNodes;
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1);
|
||||
} else {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
}
|
||||
}
|
||||
else if (node.type === "EnumerateCollectionNode") {
|
||||
++collectionNodes;
|
||||
|
@ -1032,12 +1036,18 @@ function optimizerIndexesTestSuite () {
|
|||
++indexNodes;
|
||||
if (indexNodes === 1) {
|
||||
// skiplist must be used for the first FOR
|
||||
assertEqual("skiplist", node.indexes[0].type);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1);
|
||||
} else {
|
||||
assertEqual("skiplist", node.indexes[0].type);
|
||||
}
|
||||
assertEqual("i", node.outVariable.name);
|
||||
}
|
||||
else {
|
||||
// second FOR should use a hash index
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
if (db._engine().name !== "rocksdb") {// all indexes were created equal
|
||||
// second FOR should use a hash index
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
}
|
||||
assertEqual("j", node.outVariable.name);
|
||||
}
|
||||
}
|
||||
|
@ -1111,11 +1121,19 @@ function optimizerIndexesTestSuite () {
|
|||
if (node.type === "IndexNode") {
|
||||
++indexNodes;
|
||||
if (indexNodes === 1) {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1);
|
||||
} else {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
}
|
||||
assertEqual("i", node.outVariable.name);
|
||||
}
|
||||
else if (indexNodes === 2) {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1);
|
||||
} else {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
}
|
||||
assertEqual("j", node.outVariable.name);
|
||||
}
|
||||
else {
|
||||
|
@ -1173,7 +1191,11 @@ function optimizerIndexesTestSuite () {
|
|||
walker(plan.nodes, function (node) {
|
||||
if (node.type === "IndexNode") {
|
||||
++indexNodes;
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1);
|
||||
} else {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
}
|
||||
}
|
||||
else if (node.type === "EnumerateCollectionNode") {
|
||||
++collectionNodes;
|
||||
|
@ -1263,7 +1285,11 @@ function optimizerIndexesTestSuite () {
|
|||
var plan = AQL_EXPLAIN(query).plan;
|
||||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
if (node.type === "IndexNode") {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertNotEqual(["hash", "skiplist", "persistent"].indexOf(node.indexes[0].type), -1);
|
||||
} else {
|
||||
assertEqual("hash", node.indexes[0].type);
|
||||
}
|
||||
assertFalse(node.indexes[0].unique);
|
||||
}
|
||||
return node.type;
|
||||
|
@ -1644,12 +1670,20 @@ function optimizerIndexesTestSuite () {
|
|||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
return node.type;
|
||||
});
|
||||
assertEqual(-1, nodeTypes.indexOf("IndexNode"), query);
|
||||
// rocksdb supports prefix filtering in the hash index
|
||||
if (db._engine().name !== "rocksdb") {
|
||||
assertEqual(-1, nodeTypes.indexOf("IndexNode"), query);
|
||||
}
|
||||
|
||||
var results = AQL_EXECUTE(query);
|
||||
assertEqual([ 1, 2 ], results.json.sort(), query);
|
||||
assertEqual(0, results.stats.scannedIndex);
|
||||
assertTrue(results.stats.scannedFull > 0);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual(2, results.stats.scannedIndex);
|
||||
assertEqual(0, results.stats.scannedFull);
|
||||
} else {
|
||||
assertEqual(0, results.stats.scannedIndex);
|
||||
assertTrue(results.stats.scannedFull > 0);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var internal = require("internal");
|
||||
var db = internal.db;
|
||||
var jsunity = require("jsunity");
|
||||
var helper = require("@arangodb/aql-helper");
|
||||
var isEqual = helper.isEqual;
|
||||
|
@ -162,9 +163,12 @@ function optimizerRuleTestSuite() {
|
|||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.c RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.z RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.f RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1",
|
||||
db._engine().name === "rocksdb", db._engine().name !== "rocksdb" ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1",
|
||||
db._engine().name === "rocksdb", db._engine().name !== "rocksdb" ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1",
|
||||
db._engine().name === "rocksdb", db._engine().name !== "rocksdb" ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.z RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z RETURN 1", true, false ],
|
||||
|
@ -213,21 +217,25 @@ function optimizerRuleTestSuite() {
|
|||
var j;
|
||||
var queries = [
|
||||
|
||||
["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true],
|
||||
["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true, true],
|
||||
["FOR v IN " + colName + " SORT v.b, v.a RETURN [v.a]", true],
|
||||
["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true],
|
||||
["FOR v IN " + colName + " SORT v.c RETURN [v.a, v.b]", true, true],
|
||||
["FOR v IN " + colName + " SORT v.a + 1 RETURN [v.a]", false],
|
||||
["FOR v IN " + colName + " SORT CONCAT(TO_STRING(v.a), \"lol\") RETURN [v.a]", true],
|
||||
// TODO: limit blocks sort atm.
|
||||
["FOR v IN " + colName + " FILTER v.a > 2 LIMIT 3 SORT v.a RETURN [v.a]", false],
|
||||
["FOR v IN " + colName + " FILTER v.a > 2 LIMIT 3 SORT v.a RETURN [v.a]", true],
|
||||
["FOR v IN " + colName + " FOR w IN " + colNameOther + " SORT v.a RETURN [v.a]", true]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
|
||||
var result = AQL_EXPLAIN(query[0], { }, paramIndexFromSort);
|
||||
assertEqual([], removeAlwaysOnClusterRules(result.plan.rules), query);
|
||||
if (query[1]) {
|
||||
if (db._engine().name === "rocksdb" && query.length === 3 && query[2]) {
|
||||
assertEqual(["use-index-for-sort"], removeAlwaysOnClusterRules(result.plan.rules), query);
|
||||
} else {
|
||||
assertEqual([], removeAlwaysOnClusterRules(result.plan.rules), query);
|
||||
}
|
||||
if (!query[1]) {
|
||||
var allresults = getQueryMultiplePlansAndExecutions(query[0], {});
|
||||
for (j = 1; j < allresults.results.length; j++) {
|
||||
assertTrue(isEqual(allresults.results[0],
|
||||
|
|
|
@ -32,6 +32,7 @@ var internal = require("internal");
|
|||
var jsunity = require("jsunity");
|
||||
var helper = require("@arangodb/aql-helper");
|
||||
var removeAlwaysOnClusterRules = helper.removeAlwaysOnClusterRules;
|
||||
var db = internal.db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
|
@ -117,14 +118,16 @@ function optimizerRuleUseIndexRangeTester () {
|
|||
|
||||
testRuleNoEffect : function () {
|
||||
var queries = [
|
||||
"FOR i IN UTUseIndexRangeNoInd FILTER i.a >= 2 RETURN i",
|
||||
"FOR i IN UTUseIndexRangeNoInd FILTER i.a == 2 RETURN i",
|
||||
"FOR i IN UTUseIndexRangeHashInd FILTER i.a >= 2 RETURN i"
|
||||
["FOR i IN UTUseIndexRangeNoInd FILTER i.a >= 2 RETURN i", true],
|
||||
["FOR i IN UTUseIndexRangeNoInd FILTER i.a == 2 RETURN i", true],
|
||||
["FOR i IN UTUseIndexRangeHashInd FILTER i.a >= 2 RETURN i", false]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query, { }, paramEnabled);
|
||||
assertEqual([ ], removeAlwaysOnClusterRules(result.plan.rules), query);
|
||||
var result = AQL_EXPLAIN(query[0], { }, paramEnabled);
|
||||
if (db._engine().name !== "rocksdb" || query[1]) {
|
||||
assertEqual([ ], removeAlwaysOnClusterRules(result.plan.rules), query);
|
||||
}
|
||||
});
|
||||
},
|
||||
|
||||
|
|
|
@ -443,7 +443,7 @@ function arrayIndexNonArraySuite () {
|
|||
var allIndexes = col.getIndexes(true);
|
||||
assertEqual(allIndexes.length, 2, "We have more than one index!");
|
||||
var idx = allIndexes[1];
|
||||
if (! isCluster) {
|
||||
if (! isCluster && db._engine().name === "mmfiles") {
|
||||
switch (idx.type) {
|
||||
case "hash":
|
||||
assertEqual(idx.figures.totalUsed, count);
|
||||
|
|
|
@ -32,6 +32,7 @@ var jsunity = require("jsunity");
|
|||
var internal = require("internal");
|
||||
var helper = require("@arangodb/aql-helper");
|
||||
var getQueryResults = helper.getQueryResults;
|
||||
var db = internal.db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
|
@ -912,7 +913,11 @@ function ahuacatlQueryOptimizerInTestSuite () {
|
|||
}
|
||||
c.ensureHashIndex("value");
|
||||
var query = "FOR x IN " + cn + " FILTER (x.value > 3 || x.value < 90) RETURN x.value";
|
||||
ruleIsNotUsed(query);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
ruleIsUsed(query);
|
||||
} else {
|
||||
ruleIsNotUsed(query);
|
||||
}
|
||||
},
|
||||
|
||||
testOverlappingRangesListSkiplist2 : function () {
|
||||
|
@ -945,7 +950,11 @@ function ahuacatlQueryOptimizerInTestSuite () {
|
|||
}
|
||||
c.ensureHashIndex("value");
|
||||
var query = "FOR i IN " + cn + " FILTER i.value == 8 || i.value <= 7 RETURN i.value";
|
||||
ruleIsNotUsed(query);
|
||||
if (db._engine().name === "rocksdb") {
|
||||
ruleIsUsed(query);
|
||||
} else {
|
||||
ruleIsNotUsed(query);
|
||||
}
|
||||
},
|
||||
|
||||
testNestedOrHashIndex : function () {
|
||||
|
|
|
@ -32,6 +32,7 @@ var jsunity = require("jsunity");
|
|||
var internal = require("internal");
|
||||
var helper = require("@arangodb/aql-helper");
|
||||
var getQueryResults = helper.getQueryResults;
|
||||
var db = internal.db;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
|
@ -472,7 +473,11 @@ function ahuacatlQueryOptimizerLimitTestSuite () {
|
|||
assertEqual(21, actual[1].value);
|
||||
assertEqual(29, actual[9].value);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query));
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -490,7 +495,11 @@ function ahuacatlQueryOptimizerLimitTestSuite () {
|
|||
assertEqual(docCount - 11 - i, actual[i].value);
|
||||
}
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "SortNode", "LimitNode", "ReturnNode" ], explain(query));
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "LimitNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "SortNode", "LimitNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -508,7 +517,13 @@ function ahuacatlQueryOptimizerLimitTestSuite () {
|
|||
assertEqual(21, actual[1].value);
|
||||
assertEqual(29, actual[9].value);
|
||||
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode", "FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query));
|
||||
if (db._engine().name === "rocksdb") {
|
||||
assertEqual([ "SingletonNode", "IndexNode", "CalculationNode", "FilterNode", "CalculationNode", "FilterNode",
|
||||
"LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query));
|
||||
} else {
|
||||
assertEqual([ "SingletonNode", "EnumerateCollectionNode", "CalculationNode", "FilterNode", "CalculationNode",
|
||||
"FilterNode", "LimitNode", "CalculationNode", "SortNode", "ReturnNode" ], explain(query));
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -83,7 +83,7 @@ function recoverySuite () {
|
|||
assertTrue(fs.isDirectory(fs.join(appPath, 'UnitTestsRecovery1')));
|
||||
assertTrue(fs.isFile(fs.join(appPath, 'UnitTestsRecovery1', 'foo.json')));
|
||||
|
||||
assertTrue(fs.isDirectory(fs.join(appPath, 'UnitTestsRecovery2')));
|
||||
assertFalse(fs.isDirectory(fs.join(appPath, 'UnitTestsRecovery2')));
|
||||
assertFalse(fs.isFile(fs.join(appPath, 'UnitTestsRecovery2', 'bar.json')));
|
||||
}
|
||||
|
||||
|
|
|
@ -231,6 +231,69 @@ function ReplicationSuite() {
|
|||
db._drop(cn);
|
||||
db._drop(cn2);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test collection creation
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCreateCollection: function() {
|
||||
connectToMaster();
|
||||
|
||||
compare(
|
||||
function(state) {
|
||||
},
|
||||
|
||||
function(state) {
|
||||
db._create(cn);
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
db._collection(cn).save({
|
||||
value: i
|
||||
});
|
||||
}
|
||||
internal.wal.flush(true, true);
|
||||
},
|
||||
|
||||
function(state) {
|
||||
return true;
|
||||
},
|
||||
|
||||
function(state) {
|
||||
assertTrue(db._collection(cn).count() === 100);
|
||||
}
|
||||
);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test collection dropping
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDropCollection: function() {
|
||||
connectToMaster();
|
||||
|
||||
compare(
|
||||
function(state) {
|
||||
},
|
||||
|
||||
function(state) {
|
||||
db._create(cn);
|
||||
for (var i = 0; i < 100; ++i) {
|
||||
db._collection(cn).save({
|
||||
value: i
|
||||
});
|
||||
}
|
||||
db._drop(cn);
|
||||
internal.wal.flush(true, true);
|
||||
},
|
||||
|
||||
function(state) {
|
||||
return true;
|
||||
},
|
||||
|
||||
function(state) {
|
||||
assertNull(db._collection(cn));
|
||||
}
|
||||
);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test require from present
|
||||
|
|
|
@ -0,0 +1,177 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBOptionFeature.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/FileUtils.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "ProgramOptions/Section.h"
|
||||
#include "RestServer/DatabasePathFeature.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::application_features;
|
||||
using namespace arangodb::options;
|
||||
|
||||
RocksDBOptionFeature::RocksDBOptionFeature(
|
||||
application_features::ApplicationServer* server)
|
||||
: application_features::ApplicationFeature(server, "RocksDBOption"),
|
||||
_writeBufferSize(0),
|
||||
_maxWriteBufferNumber(2),
|
||||
_delayedWriteRate(2 * 1024 * 1024),
|
||||
_minWriteBufferNumberToMerge(1),
|
||||
_numLevels(4),
|
||||
_maxBytesForLevelBase(256 * 1024 * 1024),
|
||||
_maxBytesForLevelMultiplier(10),
|
||||
_baseBackgroundCompactions(1),
|
||||
_maxBackgroundCompactions(1),
|
||||
_maxLogFileSize(0),
|
||||
_keepLogFileNum(1000),
|
||||
_logFileTimeToRoll(0),
|
||||
_compactionReadaheadSize(0),
|
||||
_verifyChecksumsInCompaction(true),
|
||||
_optimizeFiltersForHits(true)
|
||||
{
|
||||
setOptional(true);
|
||||
requiresElevatedPrivileges(false);
|
||||
startsAfter("DatabasePath");
|
||||
}
|
||||
|
||||
void RocksDBOptionFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
||||
options->addSection("rocksdb", "Configure the RocksDB engine");
|
||||
|
||||
options->addObsoleteOption(
|
||||
"--rocksdb.enabled",
|
||||
"obsolete always active - Whether or not the RocksDB engine is enabled for the persistent index",
|
||||
true);
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.write-buffer-size",
|
||||
"amount of data to build up in memory before converting to a sorted on-disk file (0 = disabled)",
|
||||
new UInt64Parameter(&_writeBufferSize));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-write-buffer-number",
|
||||
"maximum number of write buffers that built up in memory",
|
||||
new UInt64Parameter(&_maxWriteBufferNumber));
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.delayed_write_rate",
|
||||
"limited write rate to DB (in bytes per second) if we are writing to the last "
|
||||
"mem table allowed and we allow more than 3 mem tables",
|
||||
new UInt64Parameter(&_delayedWriteRate));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.min-write-buffer-number-to-merge",
|
||||
"minimum number of write buffers that will be merged together before writing "
|
||||
"to storage",
|
||||
new UInt64Parameter(&_minWriteBufferNumberToMerge));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.num-levels",
|
||||
"number of levels for the database",
|
||||
new UInt64Parameter(&_numLevels));
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.max-bytes-for-level-base",
|
||||
"control maximum total data size for a level",
|
||||
new UInt64Parameter(&_maxBytesForLevelBase));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-bytes-for-level-multiplier",
|
||||
"control maximum total data size for a level",
|
||||
new UInt64Parameter(&_maxBytesForLevelMultiplier));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.verify-checksums-in-compation",
|
||||
"if true, compaction will verify checksum on every read that happens "
|
||||
"as part of compaction",
|
||||
new BooleanParameter(&_verifyChecksumsInCompaction));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.optimize-filters-for-hits",
|
||||
"this flag specifies that the implementation should optimize the filters "
|
||||
"mainly for cases where keys are found rather than also optimize for keys "
|
||||
"missed. This would be used in cases where the application knows that "
|
||||
"there are very few misses or the performance in the case of misses is not "
|
||||
"important",
|
||||
new BooleanParameter(&_optimizeFiltersForHits));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.base-background-compactions",
|
||||
"suggested number of concurrent background compaction jobs",
|
||||
new UInt64Parameter(&_baseBackgroundCompactions));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-background-compactions",
|
||||
"maximum number of concurrent background compaction jobs",
|
||||
new UInt64Parameter(&_maxBackgroundCompactions));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.max-log-file-size",
|
||||
"specify the maximal size of the info log file",
|
||||
new UInt64Parameter(&_maxLogFileSize));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.keep-log-file-num",
|
||||
"maximal info log files to be kept",
|
||||
new UInt64Parameter(&_keepLogFileNum));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.log-file-time-to-roll",
|
||||
"time for the info log file to roll (in seconds). "
|
||||
"If specified with non-zero value, log file will be rolled "
|
||||
"if it has been active longer than `log_file_time_to_roll`",
|
||||
new UInt64Parameter(&_logFileTimeToRoll));
|
||||
|
||||
options->addOption(
|
||||
"--rocksdb.compaction-read-ahead-size",
|
||||
"if non-zero, we perform bigger reads when doing compaction. If you're "
|
||||
"running RocksDB on spinning disks, you should set this to at least 2MB. "
|
||||
"that way RocksDB's compaction is doing sequential instead of random reads.",
|
||||
new UInt64Parameter(&_compactionReadaheadSize));
|
||||
}
|
||||
|
||||
void RocksDBOptionFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
||||
if (_writeBufferSize > 0 && _writeBufferSize < 1024 * 1024) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.write-buffer-size'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_maxBytesForLevelMultiplier == 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.max-bytes-for-level-multiplier'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_numLevels < 1 || _numLevels > 20) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.num-levels'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_baseBackgroundCompactions < 1 || _baseBackgroundCompactions > 64) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid value for '--rocksdb.base-background-compactions'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
if (_maxBackgroundCompactions < _baseBackgroundCompactions) {
|
||||
_maxBackgroundCompactions = _baseBackgroundCompactions;
|
||||
}
|
||||
}
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGODB_APPLICATION_FEATURES_ROCKSDB_OPTION_FEATURE_H
|
||||
#define ARANGODB_APPLICATION_FEATURES_ROCKSDB_OPTION_FEATURE_H 1
|
||||
|
||||
#include "ApplicationFeatures/ApplicationFeature.h"
|
||||
#include "Basics/Common.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
// This feature is used to configure RocksDB in a central place.
|
||||
//
|
||||
// The RocksDB-Storage-Engine and the MMFiles-Persistent-Index
|
||||
// that are never activated at the same time take options set
|
||||
// in this feature
|
||||
|
||||
class RocksDBOptionFeature final : public application_features::ApplicationFeature {
|
||||
public:
|
||||
explicit RocksDBOptionFeature(application_features::ApplicationServer* server);
|
||||
~RocksDBOptionFeature(){};
|
||||
|
||||
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
|
||||
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
|
||||
void prepare() override final {};
|
||||
void start() override final {}
|
||||
void unprepare() override final {}
|
||||
|
||||
uint64_t _writeBufferSize;
|
||||
uint64_t _maxWriteBufferNumber;
|
||||
uint64_t _delayedWriteRate;
|
||||
uint64_t _minWriteBufferNumberToMerge;
|
||||
uint64_t _numLevels;
|
||||
uint64_t _maxBytesForLevelBase;
|
||||
uint64_t _maxBytesForLevelMultiplier;
|
||||
uint64_t _baseBackgroundCompactions;
|
||||
uint64_t _maxBackgroundCompactions;
|
||||
uint64_t _maxLogFileSize;
|
||||
uint64_t _keepLogFileNum;
|
||||
uint64_t _logFileTimeToRoll;
|
||||
uint64_t _compactionReadaheadSize;
|
||||
bool _verifyChecksumsInCompaction;
|
||||
bool _optimizeFiltersForHits;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
#endif
|
|
@ -60,7 +60,6 @@ Exception::Exception(arangodb::Result&& result, char const* file, int line)
|
|||
appendLocation();
|
||||
}
|
||||
|
||||
|
||||
/// @brief constructor, for creating an exception with an already created
|
||||
/// error message (normally based on error templates containing %s, %d etc.)
|
||||
Exception::Exception(int code, std::string const& errorMessage,
|
||||
|
|
|
@ -120,6 +120,7 @@ add_library(${LIB_ARANGO} STATIC
|
|||
ApplicationFeatures/NonceFeature.cpp
|
||||
ApplicationFeatures/PageSizeFeature.cpp
|
||||
ApplicationFeatures/PrivilegeFeature.cpp
|
||||
ApplicationFeatures/RocksDBOptionFeature.cpp
|
||||
ApplicationFeatures/ShutdownFeature.cpp
|
||||
ApplicationFeatures/TempFeature.cpp
|
||||
ApplicationFeatures/V8PlatformFeature.cpp
|
||||
|
|
|
@ -49,7 +49,7 @@ LogTopic Logger::CONFIG("config");
|
|||
LogTopic Logger::DATAFILES("datafiles", LogLevel::INFO);
|
||||
LogTopic Logger::DEVEL("development", LogLevel::FATAL);
|
||||
LogTopic Logger::ENGINES("engines", LogLevel::INFO);
|
||||
LogTopic Logger::FIXME("fixme", LogLevel::INFO);
|
||||
LogTopic Logger::FIXME("general", LogLevel::INFO);
|
||||
LogTopic Logger::GRAPHS("graphs", LogLevel::INFO);
|
||||
LogTopic Logger::HEARTBEAT("heartbeat", LogLevel::INFO);
|
||||
LogTopic Logger::MEMORY("memory", LogLevel::FATAL); // suppress
|
||||
|
@ -136,7 +136,7 @@ LogTopic::LogTopic(std::string const& name, LogLevel level)
|
|||
: _id(NEXT_TOPIC_ID.fetch_add(1, std::memory_order_seq_cst)),
|
||||
_name(name),
|
||||
_level(level) {
|
||||
if (name != "fixme") {
|
||||
if (name != "fixme" && name != "general") {
|
||||
// "fixme" is a remainder from ArangoDB < 3.2, when it was
|
||||
// allowed to log messages without a topic. From 3.2 onwards,
|
||||
// logging is always topic-based, and all previously topicless
|
||||
|
|
|
@ -111,9 +111,9 @@ void Logger::setLogLevel(std::string const& levelName) {
|
|||
|
||||
if (isGeneral) {
|
||||
Logger::setLogLevel(level);
|
||||
// setting the log level for topic "fixme" is required here, too,
|
||||
// setting the log level for topic "general" is required here, too,
|
||||
// as "fixme" is the previous general log topic...
|
||||
LogTopic::setLogLevel(std::string("fixme"), level);
|
||||
LogTopic::setLogLevel(std::string("general"), level);
|
||||
} else {
|
||||
LogTopic::setLogLevel(v[0], level);
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue