1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
Michael Hackstein 2016-11-22 17:45:25 +01:00
commit 178e8aedf9
11 changed files with 119 additions and 39 deletions

View File

@ -340,6 +340,11 @@ while [ $# -gt 0 ]; do
shift shift
;; ;;
--staticOpenSSL)
shift
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DOPENSSL_USE_STATIC_LIBS=TRUE"
;;
--enterprise) --enterprise)
shift shift

View File

@ -230,23 +230,55 @@ JOB_STATUS FailedServer::status() {
} }
if (status == PENDING) { if (status == PENDING) {
auto const& serverHealth = _snapshot(healthPrefix + _server + "/Status").getString();
// mop: ohhh...server is healthy again!
bool serverHealthy = serverHealth == Supervision::HEALTH_STATUS_GOOD;
std::shared_ptr<Builder> deleteTodos;
Node::Children const todos = _snapshot(toDoPrefix).children(); Node::Children const todos = _snapshot(toDoPrefix).children();
Node::Children const pends = _snapshot(pendingPrefix).children(); Node::Children const pends = _snapshot(pendingPrefix).children();
size_t found = 0; bool hasOpenChildTasks = false;
for (auto const& subJob : todos) { for (auto const& subJob : todos) {
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) { if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
found++; if (serverHealthy) {
if (!deleteTodos) {
deleteTodos.reset(new Builder());
deleteTodos->openArray();
deleteTodos->openObject();
}
deleteTodos->add(_agencyPrefix + toDoPrefix + subJob.first, VPackValue(VPackValueType::Object));
deleteTodos->add("op", VPackValue("delete"));
deleteTodos->close();
} else {
hasOpenChildTasks = true;
}
} }
} }
for (auto const& subJob : pends) { for (auto const& subJob : pends) {
if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) { if (!subJob.first.compare(0, _jobId.size() + 1, _jobId + "-")) {
found++; hasOpenChildTasks = true;
} }
} }
if (!found) { if (deleteTodos) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Server " << _server << " is healthy again. Will try to delete any jobs which have not yet started!";
deleteTodos->close();
deleteTodos->close();
// Transact to agency
write_ret_t res = transact(_agent, *deleteTodos);
if (!res.accepted || res.indices.size() != 1 || !res.indices[0]) {
LOG_TOPIC(WARN, Logger::AGENCY)
<< "Server was healthy. Tried deleting subjobs but failed :(";
return status;
}
}
if (!hasOpenChildTasks) {
if (finish("DBServers/" + _server)) { if (finish("DBServers/" + _server)) {
return FINISHED; return FINISHED;
} }

View File

@ -54,6 +54,7 @@ static std::string const blockedShardsPrefix = "/Supervision/Shards/";
static std::string const serverStatePrefix = "/Sync/ServerStates/"; static std::string const serverStatePrefix = "/Sync/ServerStates/";
static std::string const planVersion = "/Plan/Version"; static std::string const planVersion = "/Plan/Version";
static std::string const plannedServers = "/Plan/DBServers"; static std::string const plannedServers = "/Plan/DBServers";
static std::string const healthPrefix = "/Supervision/Health/";
inline arangodb::consensus::write_ret_t transact(Agent* _agent, inline arangodb::consensus::write_ret_t transact(Agent* _agent,
Builder const& transaction, Builder const& transaction,

View File

@ -111,11 +111,12 @@ class Supervision : public arangodb::Thread {
/// @brief Upgrade agency /// @brief Upgrade agency
void upgradeAgency(); void upgradeAgency();
private:
static constexpr const char* HEALTH_STATUS_GOOD = "GOOD"; static constexpr const char* HEALTH_STATUS_GOOD = "GOOD";
static constexpr const char* HEALTH_STATUS_BAD = "BAD"; static constexpr const char* HEALTH_STATUS_BAD = "BAD";
static constexpr const char* HEALTH_STATUS_FAILED = "FAILED"; static constexpr const char* HEALTH_STATUS_FAILED = "FAILED";
private:
/// @brief Update agency prefix from agency itself /// @brief Update agency prefix from agency itself
bool updateAgencyPrefix(size_t nTries = 10, int intervalSec = 1); bool updateAgencyPrefix(size_t nTries = 10, int intervalSec = 1);

View File

@ -40,6 +40,59 @@ thread_local TRI_request_statistics_t* TRI_request_statistics_t::STATS =
static size_t const QUEUE_SIZE = 1000; static size_t const QUEUE_SIZE = 1000;
std::string TRI_request_statistics_t::to_string(){
std::stringstream ss;
ss << std::boolalpha << std::setprecision(20) << "statistics "
<< std::endl
<< "_readStart " << _readStart << std::endl
<< "_readEnd " << _readEnd << std::endl
<< "_queueStart " << _queueStart << std::endl
<< "_queueEnd " << _queueEnd << std::endl
<< "_requestStart " << _requestStart << std::endl
<< "_requestEnd " << _requestEnd << std::endl
<< "_writeStart " << _writeStart << std::endl
<< "_writeEnd " << _writeEnd << std::endl
<< "_receivedBytes " << _receivedBytes << std::endl
<< "_sentBytes " << _sentBytes << std::endl
<< "_async " << _async << std::endl
<< "_tooLarge " << _tooLarge << std::endl
<< "_executeError " << _executeError << std::endl
<< "_ignore " << _ignore << std::endl;
return ss.str();
}
void TRI_request_statistics_t::trace_log(){
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_readStart " << _readStart;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_readEnd " << _readEnd;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_queueStart " << _queueStart;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_queueEnd " << _queueEnd;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_requestStart " << _requestStart;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_requestEnd " << _requestEnd;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_writeStart " << _writeStart;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_writeEnd " << _writeEnd;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_receivedBytes " << _receivedBytes;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_sentBytes " << _sentBytes;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_async " << _async;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_tooLarge " << _tooLarge;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_executeError " << _executeError;
LOG_TOPIC(TRACE, Logger::REQUESTS) << std::boolalpha << std::setprecision(20)
<< "_ignore " << _ignore;
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief lock for request statistics data /// @brief lock for request statistics data
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -80,27 +80,8 @@ struct TRI_request_statistics_t {
#endif #endif
} }
std::string to_string() { std::string to_string();
std::stringstream ss; void trace_log();
ss << std::boolalpha << std::setprecision(20) << "statistics "
<< std::endl
<< "_readStart " << _readStart << std::endl
<< "_readEnd " << _readEnd << std::endl
<< "_queueStart " << _queueStart << std::endl
<< "_queueEnd " << _queueEnd << std::endl
<< "_requestStart " << _requestStart << std::endl
<< "_requestEnd " << _requestEnd << std::endl
<< "_writeStart " << _writeStart << std::endl
<< "_writeEnd " << _writeEnd << std::endl
<< "_receivedBytes " << _receivedBytes << std::endl
<< "_sentBytes " << _sentBytes << std::endl
<< "_async " << _async << std::endl
<< "_tooLarge " << _tooLarge << std::endl
<< "_executeError " << _executeError << std::endl
<< "_ignore " << _ignore << std::endl;
return ss.str();
}
double _readStart; // CommTask::processRead - read first byte of message double _readStart; // CommTask::processRead - read first byte of message
double _readEnd; // CommTask::processRead - message complete double _readEnd; // CommTask::processRead - message complete

View File

@ -10,9 +10,9 @@ set(CPACK_RPM_USER_BINARY_SPECFILE "${CMAKE_CURRENT_BINARY_DIR}/arangodb.spec")
################################################################################ ################################################################################
# deploy the Init script: # deploy the Init script:
if (${RPM_DISTRO} STREQUAL "SUSE13") if (RPM_DISTRO STREQUAL "SUSE13")
set(RPM_INIT_SCRIPT "${PROJECT_SOURCE_DIR}/Installation/rpm/rc.arangod.OpenSuSE_13") set(RPM_INIT_SCRIPT "${PROJECT_SOURCE_DIR}/Installation/rpm/rc.arangod.OpenSuSE_13")
elseif (${RPM_DISTRO} STREQUAL "SUSE") elseif (RPM_DISTRO STREQUAL "SUSE")
set(RPM_INIT_SCRIPT "${PROJECT_SOURCE_DIR}/Installation/rpm/rc.arangod.OpenSuSE") set(RPM_INIT_SCRIPT "${PROJECT_SOURCE_DIR}/Installation/rpm/rc.arangod.OpenSuSE")
else () # fall back to centos: else () # fall back to centos:
set(RPM_INIT_SCRIPT "${PROJECT_SOURCE_DIR}/Installation/rpm/rc.arangod.Centos") set(RPM_INIT_SCRIPT "${PROJECT_SOURCE_DIR}/Installation/rpm/rc.arangod.Centos")

View File

@ -50,12 +50,19 @@
} }
if (internal.threadNumber === 0) { if (internal.threadNumber === 0) {
var systemCollectionsCreated = global.ArangoAgency.get('SystemCollectionsCreated');
if (!(systemCollectionsCreated && systemCollectionsCreated.arango && systemCollectionsCreated.arango.SystemCollectionsCreated)) {
// Wait for synchronous replication of system colls to settle: // Wait for synchronous replication of system colls to settle:
console.info('Waiting for synchronous replication of system collections...'); console.info('Waiting for initial replication of system collections...');
var db = internal.db; var db = internal.db;
var colls = db._collections(); var colls = db._collections();
colls = colls.filter(c => c.name()[0] === '_'); colls = colls.filter(c => c.name()[0] === '_');
require('@arangodb/cluster').waitForSyncRepl('_system', colls); if (!require('@arangodb/cluster').waitForSyncRepl('_system', colls)) {
console.error('System collections not properly set up. Starting anyway now...');
} else {
global.ArangoAgency.set('SystemCollectionsCreated', true);
}
}
} }
if (internal.threadNumber === 0) { if (internal.threadNumber === 0) {

View File

@ -2018,7 +2018,7 @@ function pathHandler (req, res, options, next) {
'use strict'; 'use strict';
var filepath, root, filename, encodedFilename; var filepath, root, filename, encodedFilename;
filepath = req.suffix.length ? path.resolve(path.sep, ...req.suffix.map((part) => decodeURIComponent(part))) : ''; filepath = req.suffix.length ? path.normalize(['', ...req.suffix.map((part) => decodeURIComponent(part))].join(path.sep)) : '';
root = options.path; root = options.path;
if (options.root) { if (options.root) {

View File

@ -25,7 +25,7 @@
var _ = require('lodash'); var _ = require('lodash');
var fs = require('fs'); var fs = require('fs');
var joinPath = require('path').join; var joinPath = require('path').join;
var resolvePath = require('path').resolve; var normalizePath = require('path').normalize;
var internal = require('internal'); var internal = require('internal');
var ArangoError = require('@arangodb').ArangoError; var ArangoError = require('@arangodb').ArangoError;
var errors = require('@arangodb').errors; var errors = require('@arangodb').errors;
@ -97,7 +97,7 @@ function swaggerPath (path, basePath) {
if (!basePath) { if (!basePath) {
basePath = joinPath(internal.startupPath, 'server', 'assets', 'swagger'); basePath = joinPath(internal.startupPath, 'server', 'assets', 'swagger');
} }
path = resolvePath('/', path); path = normalizePath('/' + path);
return joinPath(basePath, path); return joinPath(basePath, path);
} }

View File

@ -25,7 +25,7 @@
const NotFound = require('http-errors').NotFound; const NotFound = require('http-errors').NotFound;
const fs = require('fs'); const fs = require('fs');
const joinPath = require('path').join; const joinPath = require('path').join;
const resolvePath = require('path').resolve; const normalizePath = require('path').normalize;
const internal = require('internal'); const internal = require('internal');
const errors = require('@arangodb').errors; const errors = require('@arangodb').errors;
const FoxxManager = require('@arangodb/foxx/manager'); const FoxxManager = require('@arangodb/foxx/manager');
@ -102,7 +102,7 @@ module.exports = function createSwaggerRouteHandler (foxxMount, opts) {
} else if (path === 'index.html') { } else if (path === 'index.html') {
path = indexFile; path = indexFile;
} }
path = resolvePath('/', path); path = normalizePath('/' + path);
const filePath = joinPath(swaggerRoot, path); const filePath = joinPath(swaggerRoot, path);
if (!fs.isFile(filePath)) { if (!fs.isFile(filePath)) {
throw new NotFound(`unknown path "${req._raw.url}"`); throw new NotFound(`unknown path "${req._raw.url}"`);