1
0
Fork 0

Merge remote-tracking branch 'origin' into jwt

This commit is contained in:
Andreas Streichardt 2016-06-02 14:45:21 +02:00
commit a3d5ce8d16
13 changed files with 383 additions and 91 deletions

View File

@ -178,7 +178,7 @@ the [amount of documents](Miscellaneous.md#length) in a collection.
Check whether the pattern *search* is contained in the string *text*,
using wildcard matching.
- **text** (string): a string
- **text** (string): the string to search in
- **search** (string): a search pattern that can contain the wildcard characters
*%* (meaning any sequence of characters, including none) and *_* (any single
character). Literal *%* and *:* must be escaped with two backslashes.
@ -250,6 +250,63 @@ RANDOM_TOKEN(8) // "zGl09z42"
RANDOM_TOKEN(8) // "m9w50Ft9"
```
!SUBSECTION REGEX()
`REGEX(text, search, caseInsensitive) → bool`
Check whether the pattern *search* is contained in the string *text*,
using regular expression matching.
- **text** (string): the string to search in
- **search** (string): a regular expression search pattern
- returns **bool** (bool): *true* if the pattern is contained in *text*,
and *false* otherwise
The regular expression may consist of literal characters and the following
characters and sequences:
- *.*: the dot matches any single character except line terminators
- *\d*: matches a single digit, equivalent to [0-9]
- *\s*: matches a single whitespace character
- *\t*: matches a tab character
- *\r*: matches a carriage return
- *\n*: matches a line-feed character
- *[xyz]*: set of characters. matches any of the enclosed characters (i.e.
*x*, *y* or *z* in this case
- *[^xyz]*: negated set of characters. matches any other character than the
enclosed ones (i.e. anything but *x*, *y* or *z* in this case)
- *[x-z]*: range of characters. matches any of the characters in the
specified range
- *[^x-z]*: negated range of characters. matches any other character than the
ones specified in the range
- *(x|y)*: matches either *x* or *y*
- *^*: matches the beginning of the string
- *$*: matches the end of the string
Note that the characters *.*, *\**, *?*, *[*, *]*, *(*, *)*, *{*, *}*, *^*,
and *$* have a special meaning in regular expressions and may need to be
escaped using a backslash (*\\*). A literal backslash should also be escaped
using another backslash, i.e. *\\\\*.
Characters and sequences may optionally be repeated using the following
quantifiers:
- *x\**: matches zero or more occurrences of *x*
- *x+*: matches one or more occurrences of *x*
- *x?*: matches one or zero occurrences of *x*
- *x{y}*: matches exactly *y* occurrences of *x*
- *x{y,z}*: matches between *y* and *z* occurrences of *x*
- *x{y,}*: matches at least *y* occurences of *x*
If the regular expression in *search* is invalid, a warning will be raised
and the function will return *false*.
```js
REGEX("the quick brown fox", "the.*fox") // true
REGEX("the quick brown fox", "^(a|the)\s+(quick|slow).*f.x$") // true
REGEX("the\nquick\nbrown\nfox", "^the(\n[a-w]+)+\nfox$") // true
```
!SUBSECTION REVERSE()
`REVERSE(value) → reversedString`

View File

@ -42,6 +42,11 @@
* License: PCRE [BSD 3-Clause license](https://github.com/v8/v8/blob/5.0.71.39/test/mjsunit/third_party/regexp-pcre.js)
* License: object-keys [BSD-style 3-Clause license](https://github.com/v8/v8/blob/5.0.71.39/test/mjsunit/third_party/object-keys.js)
### RocksDB 4.8.0
* Project Home: https://github.com/facebook/rocksdb
* License: [BSD License](https://github.com/facebook/rocksdb/blob/master/LICENSE)
### ICU 54.1
* Project Home: http://site.icu-project.org/

View File

@ -35,13 +35,21 @@ CleanOutServer::CleanOutServer (
std::string const& server) :
Job(snapshot, agent, jobId, creator, prefix), _server(server) {
if (exists()) {
if (_server == "") {
if (_server == "") {
try {
_server = _snapshot(pendingPrefix + _jobId + "/server").getString();
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) << "CleanOutServer job with id " <<
jobId << " failed catastrophically.";
}
}
if (_server != "") {
if (exists()) {
if (status() == TODO) {
start();
}
}
if (status() == TODO) {
start();
}
}
}
@ -138,6 +146,25 @@ bool CleanOutServer::start() const {
}
bool CleanOutServer::scheduleMoveShards() const {
Node::Children const& dbservers = _snapshot("/Plan/DBServers").children();
Node::Children const& databases = _snapshot("/Plan/Collections").children();
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
for (auto const& shard : collection("shards").children()) {
VPackArrayIterator dbsit(shard.second->slice());
for (auto const& dbserver : dbsit) {
if (dbserver.copyString() == _server) {
MoveShard (_snapshot, _agent, _jobId, _creator, database.first,
collptr.first, shard.first, _server, "DBServer1");
}
}
}
}
}
return true;
}
@ -156,23 +183,32 @@ bool CleanOutServer::checkFeasibility () const {
// Determine number of available servers
Node::Children const& dbservers = _snapshot("/Plan/DBServers").children();
uint64_t nservers = dbservers.size() - cleanedServers.size() - 1;
uint64_t nservers = dbservers.size() - cleanedServers.size() - 1,
maxReplFact = 1;
// See if available servers after cleanout satisfy all replication factors
std::vector<std::string> tooLargeShards;
// Find conflictings shards
Node::Children const& databases = _snapshot("/Plan/Collections").children();
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
try {
uint64_t replFactor = (*collptr.second)("replicationFactor").getUInt();
if (replFactor > nservers) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"Cannot house all shard replics after cleaning out " << _server;
return false;
uint64_t replFact = (*collptr.second)("replicationFactor").getUInt();
if (replFact > maxReplFact) {
maxReplFact = replFact;
}
} catch (...) {}
}
}
// Report problem
if (maxReplFact > nservers) {
LOG_TOPIC(ERR, Logger::AGENCY) <<
"Cannot accomodate all shard replics after cleaning out " << _server;
return false;
}
return true;
}

View File

@ -34,7 +34,7 @@ struct CleanOutServer : public Job {
CleanOutServer(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& prefix,
std::string const& server);
std::string const& server = std::string());
virtual ~CleanOutServer();

View File

@ -29,34 +29,45 @@
using namespace arangodb::consensus;
FailedServer::FailedServer(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix,
std::string const& failed) :
Job(snapshot, agent, jobId, creator, agencyPrefix), _server(failed) {
FailedServer::FailedServer(Node const& snapshot,
Agent* agent,
std::string const& jobId,
std::string const& creator,
std::string const& agencyPrefix,
std::string const& server) :
Job(snapshot, agent, jobId, creator, agencyPrefix),
_server(server) {
try {
if (exists()) {
if (status() == TODO) {
start();
}
} else {
create();
start();
}
} catch (...) {
if (_server == "") {
if (_server == "") {
try {
_server = _snapshot(pendingPrefix + _jobId + "/server").getString();
} catch (...) {
LOG_TOPIC(ERR, Logger::AGENCY) << "FailedServer job with id " <<
jobId << " failed catastrophically.";
}
}
if (_server != "") {
try {
if (exists()) {
if (status() == TODO) {
start();
}
} else {
create();
start();
}
} catch (...) {
finish("DBServers/" + _server, false);
}
finish("DBServers/" + _server, false);
}
}
FailedServer::~FailedServer () {}
bool FailedServer::start() const {
// Copy todo to pending
Builder todo, pending;

View File

@ -34,7 +34,7 @@ struct FailedServer : public Job {
FailedServer(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix,
std::string const& failed);
std::string const& failed = std::string());
virtual ~FailedServer ();

View File

@ -81,33 +81,42 @@ bool MoveShard::create () const {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Todo: Move shard " + _shard + " from " + _from + " to " << _to;
std::string path, now(timepointToString(std::chrono::system_clock::now()));
Builder job;
job.openArray();
job.openObject();
std::string path = _agencyPrefix + toDoPrefix + _jobId;
if (_from == _to) {
path = _agencyPrefix + failedPrefix + _jobId;
job.add("timeFinished", VPackValue(now));
job.add("result",
VPackValue("Source and destination of moveShard must be different"));
} else {
path = _agencyPrefix + toDoPrefix + _jobId;
}
Builder todo;
todo.openArray();
todo.openObject();
todo.add(path, VPackValue(VPackValueType::Object));
todo.add("creator", VPackValue(_creator));
todo.add("type", VPackValue("failedLeader"));
todo.add("database", VPackValue(_database));
todo.add("collection", VPackValue(_collection));
todo.add("shard", VPackValue(_shard));
todo.add("fromServer", VPackValue(_from));
todo.add("toServer", VPackValue(_to));
todo.add("isLeader", VPackValue(true));
todo.add("jobId", VPackValue(_jobId));
todo.add("timeCreated",
VPackValue(timepointToString(std::chrono::system_clock::now())));
todo.close(); todo.close(); todo.close();
write_ret_t res = transact(_agent, todo);
job.add(path, VPackValue(VPackValueType::Object));
job.add("creator", VPackValue(_creator));
job.add("type", VPackValue("failedLeader"));
job.add("database", VPackValue(_database));
job.add("collection", VPackValue(_collection));
job.add("shard", VPackValue(_shard));
job.add("fromServer", VPackValue(_from));
job.add("toServer", VPackValue(_to));
job.add("isLeader", VPackValue(true));
job.add("jobId", VPackValue(_jobId));
job.add("timeCreated", VPackValue(now));
job.close(); job.close(); job.close();
write_ret_t res = transact(_agent, job);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) << "Failed to insert job " + _jobId;
return false;
@ -119,6 +128,8 @@ bool MoveShard::start() const {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Pending: Move shard " + _shard + " from " + _from + " to " << _to;
Builder todo, pending;
// Copy todo to pending
/* Builder todo, pending;

View File

@ -153,8 +153,8 @@ void Supervision::run() {
// We do a try/catch around everything to prevent agency crashes until
// debugging of the Supervision is finished:
try {
// try {
CONDITION_LOCKER(guard, _cv);
TRI_ASSERT(_agent != nullptr);
bool timedout = false;
@ -192,32 +192,46 @@ void Supervision::run() {
}
}
/*}
catch (std::exception const& e) {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Supervision thread has caught an exception and is terminated: "
<< e.what();
}
}*/
}
void Supervision::workJobs() {
for (auto const& todoEnt : _snapshot(toDoPrefix).children()) {
Node const& todo = *todoEnt.second;
if (todo("type").toJson() == "failedServer") {
FailedServer fs (
_snapshot, _agent, todo("jobId").toJson(), todo("creator").toJson(),
_agencyPrefix, todo("server").toJson());
Node const& job = *todoEnt.second;
std::string jobType = job("type").getString(),
jobId = job("jobId").getString(),
creator = job("creator").getString();
if (jobType == "failedServer") {
FailedServer fs(_snapshot, _agent, jobId, creator, _agencyPrefix);
} else if (jobType == "cleanOutServer") {
CleanOutServer cos(_snapshot, _agent, jobId, creator, _agencyPrefix);
}
}
for (auto const& todoEnt : _snapshot(pendingPrefix).children()) {
Node const& todo = *todoEnt.second;
if (todo("type").toJson() == "failedServer") {
FailedServer fs (
_snapshot, _agent, todo("jobId").toJson(), todo("creator").toJson(),
_agencyPrefix, todo("server").toJson());
for (auto const& pendEnt : _snapshot(pendingPrefix).children()) {
Node const& job = *pendEnt.second;
std::string jobType = job("type").getString(),
jobId = job("jobId").getString(),
creator = job("creator").getString();
if (jobType == "failedServer") {
FailedServer fs(_snapshot, _agent, jobId, creator, _agencyPrefix);
} else if (jobType == "cleanOutServer") {
CleanOutServer cos(_snapshot, _agent, jobId, creator, _agencyPrefix);
}
}
}

View File

@ -32,6 +32,7 @@
#include <rocksdb/db.h>
#include <rocksdb/convenience.h>
#include <rocksdb/env.h>
#include <rocksdb/filter_policy.h>
#include <rocksdb/iterator.h>
#include <rocksdb/options.h>
@ -53,7 +54,14 @@ static RocksDBFeature* Instance = nullptr;
RocksDBFeature::RocksDBFeature(
application_features::ApplicationServer* server)
: application_features::ApplicationFeature(server, "RocksDB"),
_db(nullptr), _comparator(nullptr), _path(), _active(true) {
_db(nullptr), _comparator(nullptr), _path(), _active(true),
_writeBufferSize(0), _maxWriteBufferNumber(2),
_delayedWriteRate(2 * 1024 * 1024), _minWriteBufferNumberToMerge(1),
_numLevels(4), _maxBytesForLevelBase(256 * 1024 * 1024),
_maxBytesForLevelMultiplier(10), _verifyChecksumsInCompaction(true),
_optimizeFiltersForHits(true), _baseBackgroundCompactions(1),
_maxBackgroundCompactions(1), _maxLogFileSize(0),
_keepLogFileNum(1000), _logFileTimeToRoll(0), _compactionReadaheadSize(0) {
setOptional(true);
requiresElevatedPrivileges(false);
startsAfter("LogfileManager");
@ -71,11 +79,117 @@ void RocksDBFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
"--rocksdb.enabled",
"Whether or not the RocksDB engine is enabled",
new BooleanParameter(&_active));
options->addOption(
"--rocksdb.write-buffer-size",
"amount of data to build up in memory before converting to a sorted on-disk file (0 = disabled)",
new UInt64Parameter(&_writeBufferSize));
options->addOption(
"--rocksdb.max-write-buffer-number",
"maximum number of write buffers that built up in memory",
new UInt64Parameter(&_maxWriteBufferNumber));
options->addHiddenOption(
"--rocksdb.delayed_write_rate",
"limited write rate to DB (in bytes per second) if we are writing to the last "
"mem table allowed and we allow more than 3 mem tables",
new UInt64Parameter(&_delayedWriteRate));
options->addOption(
"--rocksdb.min-write-buffer-number-to-merge",
"minimum number of write buffers that will be merged together before writing "
"to storage",
new UInt64Parameter(&_minWriteBufferNumberToMerge));
options->addOption(
"--rocksdb.num-levels",
"number of levels for the database",
new UInt64Parameter(&_numLevels));
options->addHiddenOption(
"--rocksdb.max-bytes-for-level-base",
"control maximum total data size for a level",
new UInt64Parameter(&_maxBytesForLevelBase));
options->addOption(
"--rocksdb.max-bytes-for-level-multiplier",
"control maximum total data size for a level",
new UInt64Parameter(&_maxBytesForLevelMultiplier));
options->addOption(
"--rocksdb.verify-checksums-in-compation",
"if true, compaction will verify checksum on every read that happens "
"as part of compaction",
new BooleanParameter(&_verifyChecksumsInCompaction));
options->addOption(
"--rocksdb.optimize-filters-for-hits",
"this flag specifies that the implementation should optimize the filters "
"mainly for cases where keys are found rather than also optimize for keys "
"missed. This would be used in cases where the application knows that "
"there are very few misses or the performance in the case of misses is not "
"important",
new BooleanParameter(&_optimizeFiltersForHits));
options->addOption(
"--rocksdb.base-background-compactions",
"suggested number of concurrent background compaction jobs",
new UInt64Parameter(&_baseBackgroundCompactions));
options->addOption(
"--rocksdb.max-background-compactions",
"maximum number of concurrent background compaction jobs",
new UInt64Parameter(&_maxBackgroundCompactions));
options->addOption(
"--rocksdb.max-log-file-size",
"specify the maximal size of the info log file",
new UInt64Parameter(&_maxLogFileSize));
options->addOption(
"--rocksdb.keep-log-file-num",
"maximal info log files to be kept",
new UInt64Parameter(&_keepLogFileNum));
options->addOption(
"--rocksdb.log-file-time-to-roll",
"time for the info log file to roll (in seconds). "
"If specified with non-zero value, log file will be rolled "
"if it has been active longer than `log_file_time_to_roll`",
new UInt64Parameter(&_logFileTimeToRoll));
options->addOption(
"--rocksdb.compaction-read-ahead-size",
"if non-zero, we perform bigger reads when doing compaction. If you're "
"running RocksDB on spinning disks, you should set this to at least 2MB. "
"that way RocksDB's compaction is doing sequential instead of random reads.",
new UInt64Parameter(&_compactionReadaheadSize));
}
void RocksDBFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
if (!_active) {
forceDisable();
} else {
if (_writeBufferSize > 0 && _writeBufferSize < 1024 * 1024) {
LOG(FATAL) << "invalid value for '--rocksdb.write-buffer-size'";
FATAL_ERROR_EXIT();
}
if (_maxBytesForLevelMultiplier == 0) {
LOG(FATAL) << "invalid value for '--rocksdb.max-bytes-for-level-multiplier'";
FATAL_ERROR_EXIT();
}
if (_numLevels < 1 || _numLevels > 20) {
LOG(FATAL) << "invalid value for '--rocksdb.num-levels'";
FATAL_ERROR_EXIT();
}
if (_baseBackgroundCompactions < 1 || _baseBackgroundCompactions > 64) {
LOG(FATAL) << "invalid value for '--rocksdb.base-background-compactions'";
FATAL_ERROR_EXIT();
}
if (_maxBackgroundCompactions < _baseBackgroundCompactions) {
_maxBackgroundCompactions = _baseBackgroundCompactions;
}
}
}
@ -107,23 +221,38 @@ void RocksDBFeature::start() {
_options.create_if_missing = true;
_options.max_open_files = -1;
_options.comparator = _comparator;
_options.write_buffer_size = static_cast<size_t>(_writeBufferSize);
_options.max_write_buffer_number = static_cast<int>(_maxWriteBufferNumber);
_options.delayed_write_rate = _delayedWriteRate;
_options.min_write_buffer_number_to_merge = static_cast<int>(_minWriteBufferNumberToMerge);
_options.num_levels = static_cast<int>(_numLevels);
_options.max_bytes_for_level_base = _maxBytesForLevelBase;
_options.max_bytes_for_level_multiplier = static_cast<int>(_maxBytesForLevelMultiplier);
_options.verify_checksums_in_compaction = _verifyChecksumsInCompaction;
_options.optimize_filters_for_hits = _optimizeFiltersForHits;
_options.base_background_compactions = static_cast<int>(_baseBackgroundCompactions);
_options.max_background_compactions = static_cast<int>(_maxBackgroundCompactions);
_options.max_log_file_size = static_cast<size_t>(_maxLogFileSize);
_options.keep_log_file_num = static_cast<size_t>(_keepLogFileNum);
_options.log_file_time_to_roll = static_cast<size_t>(_logFileTimeToRoll);
_options.compaction_readahead_size = static_cast<size_t>(_compactionReadaheadSize);
if (_options.base_background_compactions > 1 || _options.max_background_compactions > 1) {
_options.env->SetBackgroundThreads(
(std::max)(_options.base_background_compactions, _options.max_background_compactions),
rocksdb::Env::Priority::LOW);
}
//options.block_cache = rocksdb::NewLRUCache(100 * 1048576); // 100MB uncompressed cache
//options.block_cache_compressed = rocksdb::NewLRUCache(100 * 1048576); // 100MB compressed cache
//options.compression = rocksdb::kLZ4Compression;
//options.write_buffer_size = 32 << 20;
//options.max_write_buffer_number = 2;
//options.min_write_buffer_number_to_merge = 1;
//options.disableDataSync = 1;
//options.bytes_per_sync = 2 << 20;
//options.env->SetBackgroundThreads(num_threads, Env::Priority::HIGH);
//options.env->SetBackgroundThreads(num_threads, Env::Priority::LOW);
rocksdb::Status status = rocksdb::OptimisticTransactionDB::Open(_options, _path, &_db);
if (! status.ok()) {
LOG(FATAL) << "unable to initialize rocksdb: " << status.ToString();
LOG(FATAL) << "unable to initialize RocksDB: " << status.ToString();
FATAL_ERROR_EXIT();
}
}
@ -133,7 +262,7 @@ void RocksDBFeature::stop() {
return;
}
LOG(TRACE) << "shutting down rocksdb";
LOG(TRACE) << "shutting down RocksDB";
// flush
rocksdb::FlushOptions options;
@ -141,7 +270,7 @@ void RocksDBFeature::stop() {
rocksdb::Status status = _db->GetBaseDB()->Flush(options);
if (! status.ok()) {
LOG(ERR) << "error flushing rocksdb: " << status.ToString();
LOG(ERR) << "error flushing data to RocksDB: " << status.ToString();
}
syncWal();
@ -158,12 +287,12 @@ int RocksDBFeature::syncWal() {
return TRI_ERROR_NO_ERROR;
}
LOG(TRACE) << "syncing rocksdb WAL";
LOG(TRACE) << "syncing RocksDB WAL";
rocksdb::Status status = Instance->db()->GetBaseDB()->SyncWAL();
if (! status.ok()) {
LOG(ERR) << "error syncing rocksdb WAL: " << status.ToString();
LOG(ERR) << "error syncing RocksDB WAL: " << status.ToString();
return TRI_ERROR_INTERNAL;
}
#endif
@ -250,7 +379,7 @@ int RocksDBFeature::dropPrefix(std::string const& prefix) {
if (!status.ok()) {
// if file deletion failed, we will still iterate over the remaining keys, so we
// don't need to abort and raise an error here
LOG(WARN) << "rocksdb file deletion failed";
LOG(WARN) << "RocksDB file deletion failed";
}
}
@ -281,19 +410,19 @@ int RocksDBFeature::dropPrefix(std::string const& prefix) {
rocksdb::Status status = db->Write(rocksdb::WriteOptions(), &batch);
if (!status.ok()) {
LOG(WARN) << "rocksdb key deletion failed";
LOG(WARN) << "RocksDB key deletion failed: " << status.ToString();
return TRI_ERROR_INTERNAL;
}
return TRI_ERROR_NO_ERROR;
} catch (arangodb::basics::Exception const& ex) {
LOG(ERR) << "caught exception during prefix deletion: " << ex.what();
LOG(ERR) << "caught exception during RocksDB key prefix deletion: " << ex.what();
return ex.code();
} catch (std::exception const& ex) {
LOG(ERR) << "caught exception during prefix deletion: " << ex.what();
LOG(ERR) << "caught exception during RocksDB key prefix deletion: " << ex.what();
return TRI_ERROR_INTERNAL;
} catch (...) {
LOG(ERR) << "caught unknown exception during prefix deletion";
LOG(ERR) << "caught unknown exception during RocksDB key prefix deletion";
return TRI_ERROR_INTERNAL;
}
}

View File

@ -68,6 +68,21 @@ class RocksDBFeature final : public application_features::ApplicationFeature {
RocksDBKeyComparator* _comparator;
std::string _path;
bool _active;
uint64_t _writeBufferSize;
uint64_t _maxWriteBufferNumber;
uint64_t _delayedWriteRate;
uint64_t _minWriteBufferNumberToMerge;
uint64_t _numLevels;
uint64_t _maxBytesForLevelBase;
uint64_t _maxBytesForLevelMultiplier;
bool _verifyChecksumsInCompaction;
bool _optimizeFiltersForHits;
uint64_t _baseBackgroundCompactions;
uint64_t _maxBackgroundCompactions;
uint64_t _maxLogFileSize;
uint64_t _keepLogFileNum;
uint64_t _logFileTimeToRoll;
uint64_t _compactionReadaheadSize;
};
}

View File

@ -28,6 +28,8 @@
#include "Cluster/ServerState.h"
#include "HttpServer/HttpHandlerFactory.h"
#include "Logger/Logger.h"
#include "ProgramOptions/Parameters.h"
#include "ProgramOptions/ProgramOptions.h"
#include "Rest/GeneralResponse.h"
#include "Rest/Version.h"
#include "RestServer/DatabaseFeature.h"
@ -40,7 +42,7 @@ using namespace arangodb::application_features;
using namespace arangodb::options;
BootstrapFeature::BootstrapFeature(application_features::ApplicationServer* server)
: ApplicationFeature(server, "Bootstrap"), _isReady(false) {
: ApplicationFeature(server, "Bootstrap"), _isReady(false), _bark(false) {
startsAfter("Dispatcher");
startsAfter("Endpoint");
startsAfter("Scheduler");
@ -53,6 +55,11 @@ BootstrapFeature::BootstrapFeature(application_features::ApplicationServer* serv
startsAfter("RestServer");
}
void BootstrapFeature::collectOptions(
std::shared_ptr<ProgramOptions> options) {
options->addHiddenOption("hund", "make ArangoDB bark on startup", new BooleanParameter(&_bark));
}
static void raceForClusterBootstrap() {
AgencyComm agency;
auto ci = ClusterInfo::instance();
@ -148,6 +155,10 @@ void BootstrapFeature::start() {
LOG(INFO) << "ArangoDB (version " << ARANGODB_VERSION_FULL
<< ") is ready for business. Have fun!";
if (_bark) {
LOG(INFO) << "der Hund so: wau wau!";
}
_isReady = true;
}

View File

@ -31,6 +31,7 @@ class BootstrapFeature final : public application_features::ApplicationFeature {
explicit BootstrapFeature(application_features::ApplicationServer*);
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
bool isReady() const {
@ -39,6 +40,7 @@ class BootstrapFeature final : public application_features::ApplicationFeature {
private:
bool _isReady;
bool _bark;
};
}

View File

@ -94,6 +94,7 @@ var AqlHighlightRules = function() {
var builtinFunctions = (
"(to_bool|to_number|to_string|to_list|is_null|is_bool|is_number|is_string|is_list|is_document|typename|" +
"concat|concat_separator|char_length|lower|upper|substring|left|right|trim|reverse|contains|" +
"log|log2|log10|exp|exp2|sin|cos|tan|asin|acos|atan|atan2|radians|degrees|regex|" +
"like|floor|ceil|round|abs|rand|sqrt|pow|length|min|max|average|sum|median|variance_population|" +
"variance_sample|first|last|unique|matches|merge|merge_recursive|has|attributes|values|unset|unset_recursive|keep|" +
"near|within|within_rectangle|is_in_polygon|fulltext|paths|traversal|traversal_tree|edges|stddev_sample|stddev_population|" +