mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
33ed35fa1f
|
@ -1,6 +1,8 @@
|
||||||
devel
|
devel
|
||||||
-----
|
-----
|
||||||
|
|
||||||
|
* fixed issue #2459: compile success but can not run with rocksdb
|
||||||
|
|
||||||
* `--server.maximal-queue-size` is now an absolute maximum. If the queue is
|
* `--server.maximal-queue-size` is now an absolute maximum. If the queue is
|
||||||
full, then 503 is returned. Setting it to 0 means "no limit".
|
full, then 503 is returned. Setting it to 0 means "no limit".
|
||||||
|
|
||||||
|
|
|
@ -16,7 +16,7 @@ Memory management
|
||||||
This will also lower the chances of the arangod process being killed by the
|
This will also lower the chances of the arangod process being killed by the
|
||||||
operation system's OOM killer.
|
operation system's OOM killer.
|
||||||
|
|
||||||
Note: these options are only available in all builds and environments.
|
Note: these options are not available in all builds and environments.
|
||||||
|
|
||||||
* make arangod start with less V8 JavaScript contexts
|
* make arangod start with less V8 JavaScript contexts
|
||||||
|
|
||||||
|
|
|
@ -25,6 +25,7 @@
|
||||||
#include "Basics/StaticStrings.h"
|
#include "Basics/StaticStrings.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "RocksDBEngine/RocksDBCommon.h"
|
#include "RocksDBEngine/RocksDBCommon.h"
|
||||||
|
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||||
#include "VocBase/replication-common.h"
|
#include "VocBase/replication-common.h"
|
||||||
#include "VocBase/ticks.h"
|
#include "VocBase/ticks.h"
|
||||||
|
|
||||||
|
@ -87,6 +88,58 @@ class WBReader : public rocksdb::WriteBatch::Handler {
|
||||||
|
|
||||||
void SingleDelete(rocksdb::Slice const& key) override { handleDeletion(key); }
|
void SingleDelete(rocksdb::Slice const& key) override { handleDeletion(key); }
|
||||||
|
|
||||||
|
void PutLogData(rocksdb::Slice const& blob) {
|
||||||
|
auto type = RocksDBLogValue::type(blob);
|
||||||
|
switch (type) {
|
||||||
|
case RocksDBLogType::BeginTransaction: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::DatabaseCreate: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::DatabaseDrop: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::CollectionCreate: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::CollectionDrop: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::CollectionRename: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::CollectionChange: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::IndexCreate: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::IndexDrop: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::ViewCreate: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::ViewDrop: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::ViewChange: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case RocksDBLogType::DocumentRemove: {
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void startNewBatch() {
|
||||||
|
// starting new write batch
|
||||||
|
// TODO: reset state?
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
bool shouldHandleKey(rocksdb::Slice const& key) {
|
bool shouldHandleKey(rocksdb::Slice const& key) {
|
||||||
if (_limit == 0) {
|
if (_limit == 0) {
|
||||||
|
@ -203,8 +256,8 @@ RocksDBReplicationResult rocksutils::tailWal(TRI_vocbase_t* vocbase,
|
||||||
fromTickIncluded = true;
|
fromTickIncluded = true;
|
||||||
}
|
}
|
||||||
s = batch.writeBatchPtr->Iterate(handler.get());
|
s = batch.writeBatchPtr->Iterate(handler.get());
|
||||||
}
|
handler->startNewBatch();
|
||||||
if (!s.ok()) {
|
} else {
|
||||||
LOG_TOPIC(ERR, Logger::ENGINES) << "error during WAL scan";
|
LOG_TOPIC(ERR, Logger::ENGINES) << "error during WAL scan";
|
||||||
auto converted = convertStatus(s);
|
auto converted = convertStatus(s);
|
||||||
auto result = RocksDBReplicationResult(converted.errorNumber(), lastTick);
|
auto result = RocksDBReplicationResult(converted.errorNumber(), lastTick);
|
||||||
|
|
|
@ -147,9 +147,7 @@ AuthInfo::AuthInfo()
|
||||||
: _outdated(true),
|
: _outdated(true),
|
||||||
_authJwtCache(16384),
|
_authJwtCache(16384),
|
||||||
_jwtSecret(""),
|
_jwtSecret(""),
|
||||||
_queryRegistry(nullptr),
|
_queryRegistry(nullptr) {}
|
||||||
_authenticationHandler(nullptr) {
|
|
||||||
}
|
|
||||||
|
|
||||||
void AuthInfo::setJwtSecret(std::string const& jwtSecret) {
|
void AuthInfo::setJwtSecret(std::string const& jwtSecret) {
|
||||||
WRITE_LOCKER(writeLocker, _authJwtLock);
|
WRITE_LOCKER(writeLocker, _authJwtLock);
|
||||||
|
@ -245,7 +243,7 @@ void AuthInfo::reload() {
|
||||||
|
|
||||||
// TODO: is this correct?
|
// TODO: is this correct?
|
||||||
if (_authenticationHandler == nullptr) {
|
if (_authenticationHandler == nullptr) {
|
||||||
_authenticationHandler = application_features::ApplicationServer::getFeature<AuthenticationFeature>("Authentication")->getHandler();
|
_authenticationHandler.reset(application_features::ApplicationServer::getFeature<AuthenticationFeature>("Authentication")->getHandler());
|
||||||
}
|
}
|
||||||
|
|
||||||
{
|
{
|
||||||
|
@ -383,6 +381,7 @@ AuthResult AuthInfo::checkPassword(std::string const& username,
|
||||||
|
|
||||||
|
|
||||||
if (it == _authInfo.end() || (it->second.source() == AuthSource::LDAP)) { // && it->second.created() < TRI_microtime() - 60)) {
|
if (it == _authInfo.end() || (it->second.source() == AuthSource::LDAP)) { // && it->second.created() < TRI_microtime() - 60)) {
|
||||||
|
TRI_ASSERT(_authenticationHandler != nullptr);
|
||||||
AuthenticationResult authResult = _authenticationHandler->authenticate(username, password);
|
AuthenticationResult authResult = _authenticationHandler->authenticate(username, password);
|
||||||
|
|
||||||
if (!authResult.ok()) {
|
if (!authResult.ok()) {
|
||||||
|
|
|
@ -205,7 +205,7 @@ class AuthInfo {
|
||||||
arangodb::basics::LruCache<std::string, arangodb::AuthJwtResult> _authJwtCache;
|
arangodb::basics::LruCache<std::string, arangodb::AuthJwtResult> _authJwtCache;
|
||||||
std::string _jwtSecret;
|
std::string _jwtSecret;
|
||||||
aql::QueryRegistry* _queryRegistry;
|
aql::QueryRegistry* _queryRegistry;
|
||||||
AuthenticationHandler* _authenticationHandler;
|
std::unique_ptr<AuthenticationHandler> _authenticationHandler;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -528,6 +528,11 @@ function runArangoDumpRestore (options, instanceInfo, which, database, rootDir)
|
||||||
exe = ARANGORESTORE_BIN;
|
exe = ARANGORESTORE_BIN;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if(options.extremeVerbosity === true){
|
||||||
|
print(exe);
|
||||||
|
print(args);
|
||||||
|
}
|
||||||
|
|
||||||
return executeAndWait(exe, toArgv(args), options, 'arangorestore', instanceInfo.rootDir);
|
return executeAndWait(exe, toArgv(args), options, 'arangorestore', instanceInfo.rootDir);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -515,8 +515,7 @@ class ProgramOptions {
|
||||||
}
|
}
|
||||||
|
|
||||||
void failNotice(std::string const& message) {
|
void failNotice(std::string const& message) {
|
||||||
// only allowed to call if we already failed
|
_processingResult.failed(true);
|
||||||
TRI_ASSERT(_processingResult.failed());
|
|
||||||
std::cerr << " " << message << std::endl;
|
std::cerr << " " << message << std::endl;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue