From 6aad24ab490f745c168a14d615d4fc834b8f3ace Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Wed, 17 Aug 2016 16:31:19 +0200 Subject: [PATCH 1/7] Self managing (instances) tests --- js/client/modules/@arangodb/testing.js | 344 +++++------------- .../@arangodb/testing/InstanceManager.js | 283 ++++++++++++++ js/client/modules/@arangodb/testing/utils.js | 269 ++++++++++++++ js/client/tests/resilience/cluster-spec.js | 50 +++ js/client/tests/resilience/foxxmaster-spec.js | 146 ++++++++ js/client/tests/resilience/foxxmaster.js | 162 --------- js/common/modules/@arangodb/common.js | 18 + 7 files changed, 850 insertions(+), 422 deletions(-) create mode 100644 js/client/modules/@arangodb/testing/InstanceManager.js create mode 100644 js/client/modules/@arangodb/testing/utils.js create mode 100644 js/client/tests/resilience/cluster-spec.js create mode 100644 js/client/tests/resilience/foxxmaster-spec.js delete mode 100644 js/client/tests/resilience/foxxmaster.js diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index 6b104a76aa..cb9ab037fe 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -201,6 +201,13 @@ const time = require('internal').time; const toArgv = require('internal').toArgv; const wait = require('internal').wait; const platform = require('internal').platform; +const endpointToURL = require('@arangodb/common.js').endpointToURL; + +const findFreePort = require('@arangodb/testing/utils.js').findFreePort; +const startArango = require('@arangodb/testing/utils.js').startArango; +const makeArgsArangod = require('@arangodb/testing/utils.js').makeArgsArangod; +const executeArangod = require('@arangodb/testing/utils.js').executeArangod; +const makeAuthorizationHeaders = require('@arangodb/testing/utils.js').makeAuthorizationHeaders; const BLUE = require('internal').COLORS.COLOR_BLUE; const CYAN = require('internal').COLORS.COLOR_CYAN; @@ -281,34 +288,6 @@ function makeResults (testname) { }; } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief arguments for testing (server) -// ////////////////////////////////////////////////////////////////////////////// - -function makeArgsArangod (options, appDir) { - if (appDir === undefined) { - appDir = fs.getTempPath(); - } - - fs.makeDirectoryRecursive(appDir, true); - - return { - 'configuration': 'none', - 'database.force-sync-properties': 'false', - 'database.maximal-journal-size': '1048576', - 'javascript.app-path': appDir, - 'javascript.startup-directory': JS_DIR, - 'javascript.v8-contexts': '5', - 'http.trusted-origin': options.httpTrustedOrigin || 'all', - 'log.level': 'warn', - 'log.level=replication=warn': null, - 'server.allow-use-database': 'true', - 'server.authentication': 'false', - 'server.threads': '20', - 'ssl.keyfile': PEM_FILE - }; -} - // ////////////////////////////////////////////////////////////////////////////// // / @brief arguments for testing (client) // ////////////////////////////////////////////////////////////////////////////// @@ -323,36 +302,6 @@ function makeArgsArangosh (options) { }; } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief adds authorization headers -// ////////////////////////////////////////////////////////////////////////////// - -function makeAuthorizationHeaders (options) { - return { - 'headers': { - 'Authorization': 'Basic ' + base64Encode(options.username + ':' + - options.password) - } - }; -} -// ////////////////////////////////////////////////////////////////////////////// -// / @brief converts endpoints to URL -// ////////////////////////////////////////////////////////////////////////////// - -function endpointToURL (endpoint) { - if (endpoint.substr(0, 6) === 'ssl://') { - return 'https://' + endpoint.substr(6); - } - - const pos = endpoint.indexOf('://'); - - if (pos === -1) { - return 'http://' + endpoint; - } - - return 'http' + endpoint.substr(pos); -} - // ////////////////////////////////////////////////////////////////////////////// // / @brief scans the log files for important infos // ////////////////////////////////////////////////////////////////////////////// @@ -594,29 +543,6 @@ function cleanupDBDirectories (options) { } } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief finds a free port -// ////////////////////////////////////////////////////////////////////////////// - -function findFreePort (maxPort) { - if (typeof maxPort !== 'number') { - maxPort = 32768; - } - if (maxPort < 2048) { - maxPort = 2048; - } - while (true) { - const port = Math.floor(Math.random() * (maxPort - 1024)) + 1024; - const free = testPort('tcp://0.0.0.0:' + port); - - if (free) { - return port; - } - } - - return 8529; -} - // ////////////////////////////////////////////////////////////////////////////// // / @brief build a unix path // ////////////////////////////////////////////////////////////////////////////// @@ -637,7 +563,7 @@ function makePathGeneric (path) { // / @brief runs a remote unittest file using /_admin/execute // ////////////////////////////////////////////////////////////////////////////// -function runThere (options, instanceInfo, file) { +function runThere (options, endpoint, file) { try { let testCode; @@ -659,7 +585,7 @@ function runThere (options, instanceInfo, file) { httpOptions.returnBodyOnError = true; - const reply = download(instanceInfo.url + '/_admin/execute?returnAsJSON=true', + const reply = download(endpointToURL(endpoint) + '/_admin/execute?returnAsJSON=true', testCode, httpOptions); @@ -692,10 +618,27 @@ runThere.info = 'runThere'; // / @brief runs a list of tests // ////////////////////////////////////////////////////////////////////////////// -function performTests (options, testList, testname, runFn) { - let instanceInfo = startInstance('tcp', options, {}, testname); +function performTests(options, testList, testname, runFn, instanceManager) { + if (!instanceManager) { + let instanceInfo; + instanceManager = { + start: function() { + instanceInfo = startInstance('tcp', options, {}, testname); + return instanceInfo !== false; + }, + check: function() { + return checkInstanceAlive(instanceInfo, options); + }, + getEndpoint: function() { + return instanceInfo.endpoint; + }, + cleanup: function() { + shutdownInstance(instanceInfo, options); + } + } + } - if (instanceInfo === false) { + if (instanceManager && instanceManager.start() === false) { return { setup: { status: false, @@ -717,78 +660,68 @@ function performTests (options, testList, testname, runFn) { let results = {}; let continueTesting = true; + + let logFn; + if (options.extremeVerbosity) { + logFn = (testname, reason) => { + print('Skipped ' + testname + ' because of ' + reason); + } + } else { + logFn = function() {}; + } + let testcases = testList.filter(testcase => { + return filterTestcaseByOptions(testcase, options, logFn); + }); - for (let i = 0; i < testList.length; i++) { - let te = testList[i]; - let filtered = {}; + let endpoint = instanceManager.getEndpoint(); - if (filterTestcaseByOptions(te, options, filtered)) { - let first = true; - let loopCount = 0; + for (let i = 0; i < testcases.length; i++) { + let te = testcases[i]; + let loopCount = 0; + do { + print('\n' + Date() + ' ' + runFn.info + ': Trying', te, '...'); + let reply = runFn(options, endpoint, te); - while (first || options.loopEternal) { - if (!continueTesting) { - print('oops!'); - print('Skipping, ' + te + ' server is gone.'); + if (reply.hasOwnProperty('status')) { + results[te] = reply; - results[te] = { - status: false, - message: instanceInfo.exitStatus - }; - - instanceInfo.exitStatus = 'server is gone.'; + if (results[te].status === false) { + options.cleanup = false; + } + if (!reply.status && !options.force) { break; } + } else { + results[te] = { + status: false, + message: reply + }; - print('\n' + Date() + ' ' + runFn.info + ': Trying', te, '...'); - let reply = runFn(options, instanceInfo, te); - - if (reply.hasOwnProperty('status')) { - results[te] = reply; - - if (results[te].status === false) { - options.cleanup = false; - } - - if (!reply.status && !options.force) { - break; - } - } else { - results[te] = { - status: false, - message: reply - }; - - if (!options.force) { - break; - } - } - - continueTesting = checkInstanceAlive(instanceInfo, options); - - first = false; - - if (options.loopEternal) { - if (loopCount % options.loopSleepWhen === 0) { - print('sleeping...'); - sleep(options.loopSleepSec); - print('continuing.'); - } - - ++loopCount; + if (!options.force) { + break; } } - } else { - if (options.extremeVerbosity) { - print('Skipped ' + te + ' because of ' + filtered.filter); + + continueTesting = instanceManager.check(); + + if (options.loopEternal) { + if (loopCount % options.loopSleepWhen === 0) { + print('sleeping...'); + sleep(options.loopSleepSec); + print('continuing.'); + } + + ++loopCount; } - } + } while (options.loopEternal); } - print('Shutting down...'); - shutdownInstance(instanceInfo, options); - print('done.'); + if (instanceManager) { + print('Shutting down...'); + instanceManager.cleanup(); + print('done.'); + } return results; } @@ -879,43 +812,6 @@ function runStressTest (options, command, testname) { return {}; } -// ////////////////////////////////////////////////////////////////////////////// -// / @brief executes a command, possible with valgrind -// ////////////////////////////////////////////////////////////////////////////// - -function executeArangod (cmd, args, options) { - if (options.valgrind) { - let valgrindOpts = {}; - - if (options.valgrindArgs) { - valgrindOpts = options.valgrindArgs; - } - - let testfn = options.valgrindFileBase; - - if (testfn.length > 0) { - testfn += '_'; - } - - if (valgrindOpts.xml === 'yes') { - valgrindOpts['xml-file'] = testfn + '.%p.xml'; - } - - valgrindOpts['log-file'] = testfn + '.%p.valgrind.log'; - - args = toArgv(valgrindOpts, true).concat([cmd]).concat(args); - cmd = options.valgrind; - } else if (options.rr) { - args = [cmd].concat(args); - cmd = 'rr'; - } - - if (options.extremeVerbosity) { - print('starting process ' + cmd + ' with arguments: ' + JSON.stringify(args)); - } - return executeExternal(cmd, args); -} - // ////////////////////////////////////////////////////////////////////////////// // / @brief executes a command and wait for result // ////////////////////////////////////////////////////////////////////////////// @@ -1020,9 +916,11 @@ function executeAndWait (cmd, args, options, valgrindTest) { // / @brief runs file in arangosh // ////////////////////////////////////////////////////////////////////////////// -function runInArangosh (options, instanceInfo, file, addArgs) { +function runInArangosh (options, endpoint, file, addArgs) { let args = makeArgsArangosh(options); - args['server.endpoint'] = instanceInfo.endpoint; + if (endpoint) { + args['server.endpoint'] = endpoint; + } args['javascript.unit-tests'] = fs.join(TOP_DIR, file); if (!options.verbose) { @@ -1032,7 +930,6 @@ function runInArangosh (options, instanceInfo, file, addArgs) { if (addArgs !== undefined) { args = Object.assign(args, addArgs); } - fs.write('instanceinfo.json', JSON.stringify(instanceInfo)); let rc = executeAndWait(ARANGOSH_BIN, toArgv(args), options); let result; @@ -1051,8 +948,8 @@ function runInArangosh (options, instanceInfo, file, addArgs) { } function createArangoshRunner(args) { - let runner = function(options, instanceInfo, file) { - return runInArangosh(options, instanceInfo, file, args); + let runner = function(options, endpoint, file) { + return runInArangosh(options, endpoint, file, args); }; runner.info = 'arangosh'; return runner; @@ -1349,75 +1246,6 @@ function startInstanceCluster (instanceInfo, protocol, options, return true; } -function startArango (protocol, options, addArgs, rootDir, role) { - const dataDir = fs.join(rootDir, 'data'); - const appDir = fs.join(rootDir, 'apps'); - - fs.makeDirectoryRecursive(dataDir); - fs.makeDirectoryRecursive(appDir); - - let args = makeArgsArangod(options, appDir); - let endpoint; - let port; - - if (!addArgs['server.endpoint']) { - port = findFreePort(options.maxPort); - endpoint = protocol + '://127.0.0.1:' + port; - } else { - endpoint = addArgs['server.endpoint']; - port = endpoint.split(':').pop(); - } - - let instanceInfo = { - role, port, endpoint, rootDir}; - - args['server.endpoint'] = endpoint; - args['database.directory'] = dataDir; - args['log.file'] = fs.join(rootDir, 'log'); - - if (options.verbose) { - args['log.level'] = 'info'; - } else { - args['log.level'] = 'error'; - } - - // flush log messages directly and not asynchronously - // (helps debugging) - args['log.force-direct'] = 'true'; - - if (protocol === 'ssl') { - args['ssl.keyfile'] = fs.join('UnitTests', 'server.pem'); - } - - args = Object.assign(args, options.extraArgs); - - if (addArgs !== undefined) { - args = Object.assign(args, addArgs); - } - - instanceInfo.url = endpointToURL(instanceInfo.endpoint); - instanceInfo.pid = executeArangod(ARANGOD_BIN, toArgv(args), options).pid; - instanceInfo.role = role; - - if (platform.substr(0, 3) === 'win') { - const procdumpArgs = [ - '-accepteula', - '-e', - '-ma', - instanceInfo.pid, - fs.join(rootDir, 'core.dmp') - ]; - - try { - instanceInfo.monitor = executeExternal('procdump', procdumpArgs); - } catch (x) { - print('failed to start procdump - is it installed?'); - throw x; - } - } - return instanceInfo; -} - function startInstanceAgency (instanceInfo, protocol, options, addArgs, rootDir) { const dataDir = fs.join(rootDir, 'data'); @@ -3386,10 +3214,6 @@ testFuncs.resilience = function (options) { testFuncs.client_resilience = function (options) { findTests(); - options.cluster = true; - if (options.coordinators < 2) { - options.coordinators = 2; - } return performTests(options, testsCases.client_resilience, 'client_resilience', createArangoshRunner()); }; diff --git a/js/client/modules/@arangodb/testing/InstanceManager.js b/js/client/modules/@arangodb/testing/InstanceManager.js new file mode 100644 index 0000000000..3246268a42 --- /dev/null +++ b/js/client/modules/@arangodb/testing/InstanceManager.js @@ -0,0 +1,283 @@ +const fs = require('fs'); +const path = require('path'); +const _ = require('lodash'); + +const findFreePort = require('@arangodb/testing/utils.js').findFreePort; +const startArango = require('@arangodb/testing/utils.js').startArango; +const makeArgsArangod = require('@arangodb/testing/utils.js').makeArgsArangod; +const executeArangod = require('@arangodb/testing/utils.js').executeArangod; +const makeAuthorizationHeaders = require('@arangodb/testing/utils.js').makeAuthorizationHeaders; +const ARANGOD_BIN = require('@arangodb/testing/utils.js').ARANGOD_BIN; + +const killExternal = require('internal').killExternal; +const statusExternal = require('internal').statusExternal; +const download = require('internal').download; +const wait = require('internal').wait; + +// ////////////////////////////////////////////////////////////////////////////// +// / @brief periodic checks whether spawned arangod processes are still alive +// ////////////////////////////////////////////////////////////////////////////// +function checkArangoAlive (arangod, options = {}) { + if (arangod.hasOwnProperty('exitStatus')) { + return false; + } + + const res = statusExternal(arangod.pid, false); + const ret = res.status === 'RUNNING'; + + if (!ret) { + print('ArangoD with PID ' + arangod.pid + ' gone:'); + print(arangod); + + if (res.hasOwnProperty('signal') && + ((res.signal === 11) || + (res.signal === 6) || + // Windows sometimes has random numbers in signal... + (platform.substr(0, 3) === 'win') + ) + ) { + arangod.exitStatus = res; + //analyzeServerCrash(arangod, options, 'health Check'); + } + } + + return ret; +} + +let makeArgs = function (name, rootDir, options, args) { + args = args || options.extraArgs; + + let subDir = fs.join(rootDir, name); + fs.makeDirectoryRecursive(subDir); + + let subArgs = makeArgsArangod(options, fs.join(subDir, 'apps')); + subArgs = Object.assign(subArgs, args); + + return [subArgs, subDir]; +}; + +class InstanceManager { + constructor(name) { + this.rootDir = fs.join(fs.getTempFile(), name); + this.instances = []; + } + + startDbServer(options = {}) { + let endpoint = 'tcp://127.0.0.1:' + findFreePort(options.maxPort); + let primaryArgs = options.extraArgs ? _.clone(options.extraArgs) : {}; + primaryArgs['server.endpoint'] = endpoint; + primaryArgs['cluster.my-address'] = endpoint; + primaryArgs['cluster.my-local-info'] = endpoint; + primaryArgs['cluster.my-role'] = 'PRIMARY'; + primaryArgs['cluster.agency-endpoint'] = this.getAgencyEndpoint(); + + this.instances.push(startArango('tcp', options, ...makeArgs('dbserver' + Math.floor(Math.random() * 1000000000), this.rootDir, options, primaryArgs), 'dbserver')); + return this.instances[this.instances.length - 1]; + } + + getAgencyEndpoint() { + return this.instances.filter(instance => { + return instance.role == 'agent'; + })[0].endpoint; + } + + startCoordinator(options = {}) { + let endpoint = 'tcp://127.0.0.1:' + findFreePort(options.maxPort); + let coordinatorArgs = options.extraArgs ? _.clone(options.extraArgs) : {}; + coordinatorArgs['server.endpoint'] = endpoint; + coordinatorArgs['cluster.my-address'] = endpoint; + coordinatorArgs['cluster.my-local-info'] = endpoint; + coordinatorArgs['cluster.my-role'] = 'COORDINATOR'; + coordinatorArgs['cluster.agency-endpoint'] = this.getAgencyEndpoint(); + + this.instances.push(startArango('tcp', options, ...makeArgs('coordinator' + Math.floor(Math.random() * 1000000000), this.rootDir, options, coordinatorArgs), 'coordinator')); + return this.instances[this.instances.length - 1]; + } + + startAgency(options = {}) { + let size = options.agencySize || 1; + if (options.agencyWaitForSync === undefined) { + options.agencyWaitForSync = false; + } + const wfs = options.agencyWaitForSync; + for (var i=0;i { + return instance.role == 'agent'; + }) + .forEach(arangod => { + l.push('--agency.endpoint'); + l.push(arangod.endpoint); + }); + l.push('--agency.endpoint'); + l.push('tcp://127.0.0.1:' + port); + l.push('--agency.notify'); + l.push('true'); + + instanceArgs['flatCommands'] = l; + } + this.instances.push(startArango('tcp', options, instanceArgs, dir, 'agent')); + } + return this.instances.filter(instance => { + return instance.role == 'agent'; + }); + } + + + + startCluster(numAgents, numCoordinators, numDbServers, options = {}) { + print("Starting Cluster with Agents: " + numAgents + " Coordinators: " + numCoordinators + " DBServers: " + numDbServers); + + let agencyOptions = options.agents || {}; + _.extend(agencyOptions, {agencySize: numAgents}); + this.startAgency(agencyOptions); + + let coordinatorOptions = options.coordinators || {}; + let i; + for (i=0;i { + print("pid: " + instance.pid +", role: " + instance.role + ", endpoint: " + instance.endpoint); + }; + this.agents().forEach(debugInfo); + this.coordinators().forEach(debugInfo); + this.dbServers().forEach(debugInfo); + + return this.coordinators()[0].endpoint; + } + + waitForAllInstances() { + let count = 0; + this.instances.forEach(arangod => { + while (true) { + const reply = download(arangod.url + '/_api/version', '', makeAuthorizationHeaders(arangod.options)); + + if (!reply.error && reply.code === 200) { + break; + } + + ++count; + + if (count % 60 === 0) { + if (!checkArangoAlive(arangod)) { + throw new Error('Arangod with pid ' + arangod.pid + ' was not running. Full info: ' + JSON.stringify(arangod)); + } + } + wait(0.5, false); + } + }); + return this.getEndpoint(); + } + + getEndpoint() { + return this.coordinators()[0].endpoint; + } + + check() { + let failedInstances = this.instances.filter(instance => { + instance.exitStatus = statusExternal(instance.pid, false); + return instance.exitStatus.status != 'RUNNING'; + }); + + if (failedInstances.length > 0) { + throw new Error('Some instances died'); + } + } + + cleanup() { + console.warn('Shutting down cluster'); + const requestOptions = makeAuthorizationHeaders({}); + requestOptions.method = 'DELETE'; + download(this.coordinators()[0].url + '/_admin/shutdown?shutdown_cluster=1', '', requestOptions); + let timeout = 60; + let waitTime = 0.5; + let start = Date.now(); + + let kap0tt = false; + let toShutdown = this.instances.slice(); + while (toShutdown.length > 0) { + toShutdown.forEach(instance => { + instance.exitStatus = statusExternal(instance.pid, false); + }); + + toShutdown = toShutdown.filter(instance => { + return instance.exitStatus.status == 'RUNNING'; + }); + + if (toShutdown.length > 0) { + let totalTime = Date.now() - start; + if (totalTime / 1000 > timeout) { + kap0tt = true; + toShutdown.forEach(instance => { + //killExternal(instance.pid); + }); + break; + } + wait(waitTime); + } + } + if (!kap0tt) { + fs.removeDirectoryRecursive(this.rootDir, true); + } + } + + dbServers() { + return this.instances.filter(instance => { + return instance.role == 'dbserver'; + }); + } + + coordinators() { + return this.instances.filter(instance => { + return instance.role == 'coordinator'; + }); + } + + agents() { + return this.instances.filter(instance => { + return instance.role == 'agent'; + }); + } + + kill(instance) { + let index = this.instances.indexOf(instance); + if (index === -1) { + throw new Error('Couldn\'t find instance', instance); + } + + killExternal(instance.pid); + instance.status = 'KILLED'; + } + + restart(instance) { + let index = this.instances.indexOf(instance); + if (index === -1) { + throw new Error('Couldn\'t find instance', instance); + } + + instance.pid = executeArangod(ARANGOD_BIN, instance.args, instance.options).pid; + } +} + +module.exports = InstanceManager; diff --git a/js/client/modules/@arangodb/testing/utils.js b/js/client/modules/@arangodb/testing/utils.js new file mode 100644 index 0000000000..c4d379808b --- /dev/null +++ b/js/client/modules/@arangodb/testing/utils.js @@ -0,0 +1,269 @@ +'use strict'; + +const fs = require('fs'); +const testPort = require('internal').testPort; +const endpointToURL = require('@arangodb/common.js').endpointToURL; +const toArgv = require('internal').toArgv; +const executeExternal = require('internal').executeExternal; +const platform = require('internal').platform; +const _ = require('lodash'); +const base64Encode = require('internal').base64Encode; + +let TOP_DIR = findTopDir(); +let BIN_DIR = fs.join(TOP_DIR, 'build', 'bin'); +let ARANGOD_BIN = fs.join(BIN_DIR, 'arangod'); + + +// ////////////////////////////////////////////////////////////////////////////// +// / @brief executes a command, possible with valgrind +// ////////////////////////////////////////////////////////////////////////////// + +function executeArangod (cmd, args, options) { + if (options.valgrind) { + let valgrindOpts = {}; + + if (options.valgrindArgs) { + valgrindOpts = options.valgrindArgs; + } + + let testfn = options.valgrindFileBase; + + if (testfn.length > 0) { + testfn += '_'; + } + + if (valgrindOpts.xml === 'yes') { + valgrindOpts['xml-file'] = testfn + '.%p.xml'; + } + + valgrindOpts['log-file'] = testfn + '.%p.valgrind.log'; + + args = toArgv(valgrindOpts, true).concat([cmd]).concat(args); + cmd = options.valgrind; + } else if (options.rr) { + args = [cmd].concat(args); + cmd = 'rr'; + } + + if (options.extremeVerbosity) { + print('starting process ' + cmd + ' with arguments: ' + JSON.stringify(args)); + } + return executeExternal(cmd, args); +} + +function startArango (protocol, options, addArgs, rootDir, role) { + const dataDir = fs.join(rootDir, 'data'); + const appDir = fs.join(rootDir, 'apps'); + + fs.makeDirectoryRecursive(dataDir); + fs.makeDirectoryRecursive(appDir); + + let args = makeArgsArangod(options, appDir); + let endpoint; + let port; + + if (!addArgs['server.endpoint']) { + port = findFreePort(options.maxPort); + endpoint = protocol + '://127.0.0.1:' + port; + } else { + endpoint = addArgs['server.endpoint']; + port = endpoint.split(':').pop(); + } + + let instanceInfo = { + role, port, endpoint, rootDir}; + + args['server.endpoint'] = endpoint; + args['database.directory'] = dataDir; + args['log.file'] = fs.join(rootDir, 'log'); + + if (options.verbose) { + args['log.level'] = 'info'; + } else { + args['log.level'] = 'error'; + } + + // flush log messages directly and not asynchronously + // (helps debugging) + args['log.force-direct'] = 'true'; + + if (protocol === 'ssl') { + args['ssl.keyfile'] = fs.join('UnitTests', 'server.pem'); + } + + args = Object.assign(args, options.extraArgs); + + if (addArgs !== undefined) { + args = Object.assign(args, addArgs); + } + + instanceInfo.url = endpointToURL(instanceInfo.endpoint); + instanceInfo.args = toArgv(args); + instanceInfo.options = options; + instanceInfo.pid = executeArangod(ARANGOD_BIN, instanceInfo.args, options).pid; + instanceInfo.role = role; + + if (platform.substr(0, 3) === 'win') { + const procdumpArgs = [ + '-accepteula', + '-e', + '-ma', + instanceInfo.pid, + fs.join(rootDir, 'core.dmp') + ]; + + try { + instanceInfo.monitor = executeExternal('procdump', procdumpArgs); + } catch (x) { + print('failed to start procdump - is it installed?'); + throw x; + } + } + return instanceInfo; +} + +function findTopDir () { + const topDir = fs.normalize(fs.makeAbsolute('.')); + + if (!fs.exists('3rdParty') && !fs.exists('arangod') && + !fs.exists('arangosh') && !fs.exists('UnitTests')) { + throw 'Must be in ArangoDB topdir to execute unit tests.'; + } + + return topDir; +} + +function startInstanceAgency(instanceInfo, protocol, options, + addArgs, rootDir) { + console.error("HASS", instanceInfo); + const dataDir = fs.join(rootDir, 'data'); + + const N = options.agencySize; + if (options.agencyWaitForSync === undefined) { + options.agencyWaitForSync = false; + } + const wfs = options.agencyWaitForSync; + + for (let i = 0; i < N; i++) { + let instanceArgs = _.clone(addArgs); + instanceArgs['agency.id'] = String(i); + instanceArgs['agency.size'] = String(N); + instanceArgs['agency.wait-for-sync'] = String(wfs); + instanceArgs['agency.supervision'] = 'true'; + instanceArgs['database.directory'] = dataDir + String(i); + + if (i === N - 1) { + const port = findFreePort(options.maxPort); + instanceArgs['server.endpoint'] = 'tcp://127.0.0.1:' + port; + let l = []; + instanceInfo.arangods.forEach(arangod => { + l.push('--agency.endpoint'); + l.push(arangod.endpoint); + }); + l.push('--agency.endpoint'); + l.push('tcp://127.0.0.1:' + port); + l.push('--agency.notify'); + l.push('true'); + + instanceArgs['flatCommands'] = l; + } + let dir = fs.join(rootDir, 'agency-' + i); + fs.makeDirectoryRecursive(dir); + + instanceInfo.arangods.push(startArango(protocol, options, instanceArgs, rootDir, 'agent')); + } + + instanceInfo.endpoint = instanceInfo.arangods[instanceInfo.arangods.length - 1].endpoint; + instanceInfo.url = instanceInfo.arangods[instanceInfo.arangods.length - 1].url; + instanceInfo.role = 'agent'; + print('Agency Endpoint: ' + instanceInfo.endpoint); + + return instanceInfo; +} + + +// ////////////////////////////////////////////////////////////////////////////// +// / @brief arguments for testing (server) +// ////////////////////////////////////////////////////////////////////////////// + +function makeArgsArangod (options, appDir) { + if (appDir === undefined) { + appDir = fs.getTempPath(); + } + + fs.makeDirectoryRecursive(appDir, true); + + return { + 'configuration': 'none', + 'database.force-sync-properties': 'false', + 'database.maximal-journal-size': '1048576', + 'javascript.app-path': appDir, + 'javascript.startup-directory': 'js', + 'javascript.v8-contexts': '5', + 'http.trusted-origin': options.httpTrustedOrigin || 'all', + 'log.level': 'warn', + 'log.level=replication=warn': null, + 'server.allow-use-database': 'true', + 'server.authentication': 'false', + 'server.threads': '20', + 'ssl.keyfile': 'UnitTests/server.pem', + }; +} + +// ////////////////////////////////////////////////////////////////////////////// +// / @brief finds a free port +// ////////////////////////////////////////////////////////////////////////////// + +function findFreePort (maxPort) { + if (typeof maxPort !== 'number') { + maxPort = 32768; + } + if (maxPort < 2048) { + maxPort = 2048; + } + while (true) { + const port = Math.floor(Math.random() * (maxPort - 1024)) + 1024; + const free = testPort('tcp://0.0.0.0:' + port); + + if (free) { + return port; + } + } + + return 8529; +} + +exports.findTopDir = findTopDir; +exports.findFreePort = findFreePort; +exports.startInstanceAgency = startInstanceAgency; +exports.startArango = startArango; +exports.executeArangod = executeArangod; +exports.makeArgsArangod = makeArgsArangod; + +exports.startAgency = function(size, options = {}) { + let instances = []; + options.agencySize = size; + let legacyInstanceInfo = {arangods: []}; + for (var i=0;i { + let url = arangod.endpoint.replace(/tcp/, 'http') + '/_admin/server/id'; + let res = request({method: 'GET', url: url}); + let parsed = JSON.parse(res.body); + return parsed.id === server; + })[0]; + + expect(instance).to.not.be.undefined; + instanceManager.kill(instance); + let newEndpoint = instanceManager.coordinators().filter(arangod => { + return arangod.role === 'coordinator' && arangod.pid !== instance.pid; + })[0]; + arango.reconnect(newEndpoint.endpoint, db._name(), 'root', ''); + let waitInterval = 0.1; + let waited = 0; + let ok = false; + while (waited <= 20) { + document = db._collection('foxxqueuetest').document('test'); + let newServer = document.server; + if (server !== newServer) { + ok = true; + break; + } + wait(waitInterval); + waited += waitInterval; + } + // mop: currently supervision would run every 5s + if (!ok) { + throw new Error('Supervision should have moved the foxxqueues and foxxqueues should have been started to run on a new coordinator'); + } + }) +}); diff --git a/js/client/tests/resilience/foxxmaster.js b/js/client/tests/resilience/foxxmaster.js deleted file mode 100644 index 0b41640361..0000000000 --- a/js/client/tests/resilience/foxxmaster.js +++ /dev/null @@ -1,162 +0,0 @@ -/*jshint strict: false, sub: true */ -/*global print, arango, assertTrue, assertNotNull, assertNotUndefined */ -'use strict'; - -//////////////////////////////////////////////////////////////////////////////// -/// DISCLAIMER -/// -/// Copyright 2016 ArangoDB GmbH, Cologne, Germany -/// Copyright 2014 triagens GmbH, Cologne, Germany -/// -/// Licensed under the Apache License, Version 2.0 (the "License"); -/// you may not use this file except in compliance with the License. -/// You may obtain a copy of the License at -/// -/// http://www.apache.org/licenses/LICENSE-2.0 -/// -/// Unless required by applicable law or agreed to in writing, software -/// distributed under the License is distributed on an "AS IS" BASIS, -/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -/// See the License for the specific language governing permissions and -/// limitations under the License. -/// -/// Copyright holder is ArangoDB GmbH, Cologne, Germany -/// -/// @author Andreas Streichardt -//////////////////////////////////////////////////////////////////////////////// - -const jsunity = require('jsunity'); -const arangodb = require('@arangodb'); -const wait = require('internal').wait; -const db = arangodb.db; -const fs = require('fs'); -const console = require('console'); -const request = require("@arangodb/request"); -const foxxManager = require('@arangodb/foxx/manager'); - -const suspendExternal = require('internal').suspendExternal; -const continueExternal = require("internal").continueExternal; -const download = require('internal').download; - -const instanceInfo = JSON.parse(fs.read('instanceinfo.json')); - -try { - let globals = JSON.parse(process.env.ARANGOSH_GLOBALS); - Object.keys(globals).forEach(g => { - global[g] = globals[g]; - }); -} catch (e) { -} - - -let executeOnServer = function(code) { - let httpOptions = {}; - httpOptions.method = 'POST'; - httpOptions.timeout = 3600; - - httpOptions.returnBodyOnError = true; - const reply = download(instanceInfo.url + '/_admin/execute?returnAsJSON=true', - code, - httpOptions); - - if (!reply.error && reply.code === 200) { - return JSON.parse(reply.body); - } else { - throw new Error('Could not send to server ' + JSON.stringify(reply)); - } -}; - -function serverSetup() { - let directory = require('./js/client/assets/queuetest/dirname.js'); - foxxManager.install(directory, '/queuetest'); - db._create('foxxqueuetest', {numberOfShards: 1, replicationFactor: 1}); - db.foxxqueuetest.insert({'_key': 'test', 'date': null, 'server': null}); - - const serverCode = ` -const queues = require('@arangodb/foxx/queues'); - -let queue = queues.create('q'); -queue.push({mount: '/queuetest', name: 'queuetest', 'repeatTimes': -1, 'repeatDelay': 1000}, {}); -`; - executeOnServer(serverCode); -} - -function serverTeardown() { - const serverCode = ` -const queues = require('@arangodb/foxx/queues'); -`; - executeOnServer(serverCode); - foxxManager.uninstall('/queuetest'); - db._drop('foxxqueuetest'); -} - -function FoxxmasterSuite() { - return { - setUp: function() { - serverSetup(); - wait(2.1); - }, - - tearDown : function () { - serverTeardown(); - }, - - testQueueWorks: function() { - let document = db._collection('foxxqueuetest').document('test'); - assertNotNull(document.server); - }, - - testQueueFailover: function() { - let document = db._collection('foxxqueuetest').document('test'); - let server = document.server; - assertNotNull(server); - - let instance = instanceInfo.arangods.filter(arangod => { - if (arangod.role === 'agent') { - return false; - } - let url = arangod.endpoint.replace(/tcp/, 'http') + '/_admin/server/id'; - let res = request({method: 'GET', url: url}); - let parsed = JSON.parse(res.body); - if (parsed.id === server) { - assertTrue(suspendExternal(arangod.pid)); - } - return parsed.id === server; - })[0]; - - assertNotUndefined(instance); - assertTrue(suspendExternal(instance.pid)); - - let newEndpoint = instanceInfo.arangods.filter(arangod => { - return arangod.role === 'coordinator' && arangod.pid !== instance.pid; - })[0]; - arango.reconnect(newEndpoint.endpoint, db._name(), 'root', ''); - let waitInterval = 0.1; - let waited = 0; - let ok = false; - while (waited <= 20) { - document = db._collection('foxxqueuetest').document('test'); - let newServer = document.server; - if (server !== newServer) { - ok = true; - break; - } - wait(waitInterval); - waited += waitInterval; - } - assertTrue(continueExternal(instance.pid)); - // mop: currently supervision would run every 5s - if (!ok) { - throw new Error('Supervision should have moved the foxxqueues and foxxqueues should have been started to run on a new coordinator'); - } - } - }; -} - -//////////////////////////////////////////////////////////////////////////////// -/// @brief executes the test suite -//////////////////////////////////////////////////////////////////////////////// - -jsunity.run(FoxxmasterSuite); - -return jsunity.done(); diff --git a/js/common/modules/@arangodb/common.js b/js/common/modules/@arangodb/common.js index be51b53887..7849a70fb1 100644 --- a/js/common/modules/@arangodb/common.js +++ b/js/common/modules/@arangodb/common.js @@ -506,3 +506,21 @@ exports.checkAvailableVersions = function (version) { } } }; + +// ////////////////////////////////////////////////////////////////////////////// +// / @brief converts endpoints to URL +// ////////////////////////////////////////////////////////////////////////////// + +exports.endpointToURL = function(endpoint) { + if (endpoint.substr(0, 6) === 'ssl://') { + return 'https://' + endpoint.substr(6); + } + + const pos = endpoint.indexOf('://'); + + if (pos === -1) { + return 'http://' + endpoint; + } + + return 'http' + endpoint.substr(pos); +} From 70af1e36470b6b3f92e8677a6bb8379c9fa9280d Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Tue, 16 Aug 2016 18:18:12 +0200 Subject: [PATCH 2/7] Implement proper cluster shutdown --- arangod/Agency/Node.cpp | 8 ++ arangod/Agency/Node.h | 2 + arangod/Agency/Supervision.cpp | 87 ++++++++++++++------- arangod/Agency/Supervision.h | 5 ++ arangod/Cluster/HeartbeatThread.cpp | 52 ++++++++---- arangod/RestHandler/RestShutdownHandler.cpp | 22 +++++- 6 files changed, 127 insertions(+), 49 deletions(-) diff --git a/arangod/Agency/Node.cpp b/arangod/Agency/Node.cpp index 83a38b4a92..d46306c430 100644 --- a/arangod/Agency/Node.cpp +++ b/arangod/Agency/Node.cpp @@ -730,6 +730,14 @@ uint64_t Node::getUInt() const { } +bool Node::getBool() const { + if (type() == NODE) { + throw StoreException("Must not convert NODE type to bool"); + } + return slice().getBool(); + +} + double Node::getDouble() const { if (type() == NODE) { diff --git a/arangod/Agency/Node.h b/arangod/Agency/Node.h index cb1bcfb1f3..992834df85 100644 --- a/arangod/Agency/Node.h +++ b/arangod/Agency/Node.h @@ -258,6 +258,8 @@ public: /// @brief Get insigned value (throws if type NODE or if conversion fails) uint64_t getUInt() const; + /// @brief Get bool value (throws if type NODE or if conversion fails) + bool getBool() const; /// @brief Get double value (throws if type NODE or if conversion fails) double getDouble() const; diff --git a/arangod/Agency/Supervision.cpp b/arangod/Agency/Supervision.cpp index b3732b2c2b..d49785d958 100644 --- a/arangod/Agency/Supervision.cpp +++ b/arangod/Agency/Supervision.cpp @@ -31,6 +31,7 @@ #include "Job.h" #include "Store.h" +#include "ApplicationFeatures/ApplicationServer.h" #include "Basics/ConditionLocker.h" #include "VocBase/server.h" @@ -38,6 +39,7 @@ using namespace arangodb; using namespace arangodb::consensus; +using namespace arangodb::application_features; std::string Supervision::_agencyPrefix = "/arango"; @@ -326,45 +328,72 @@ bool Supervision::doChecks() { } void Supervision::run() { - CONDITION_LOCKER(guard, _cv); TRI_ASSERT(_agent != nullptr); + // Get agency prefix after cluster init + if (_jobId == 0) { + // We need the agency prefix to work, but it is only initialized by + // some other server in the cluster. Since the supervision does not + // make sense at all without other ArangoDB servers, we wait pretty + // long here before giving up: + if (!updateAgencyPrefix(1000, 1)) { + LOG_TOPIC(ERR, Logger::AGENCY) + << "Cannot get prefix from Agency. Stopping supervision for good."; + return; + } + } + while (!this->isStopping()) { - - // Get agency prefix after cluster init - if (_jobId == 0) { - // We need the agency prefix to work, but it is only initialized by - // some other server in the cluster. Since the supervision does not - // make sense at all without other ArangoDB servers, we wait pretty - // long here before giving up: - if (!updateAgencyPrefix(1000, 1)) { - LOG_TOPIC(DEBUG, Logger::AGENCY) - << "Cannot get prefix from Agency. Stopping supervision for good."; + updateSnapshot(); + if (isShuttingDown()) { + handleShutdown(); + } else if (_agent->leading()) { + if (!handleJobs()) { break; } } - - // Get bunch of job IDs from agency for future jobs - if (_jobId == 0 || _jobId == _jobIdMax) { - getUniqueIds(); // cannot fail but only hang + _cv.wait(_frequency * 1000000); + } +} + +bool Supervision::isShuttingDown() { + try { + return _snapshot("/Shutdown").getBool(); + } catch (...) { + return false; + } +} + +void Supervision::handleShutdown() { + LOG_TOPIC(DEBUG, Logger::AGENCY) << "Initiating shutdown"; + Node::Children const& serversRegistered = _snapshot(currentServersRegisteredPrefix).children(); + bool serversCleared = true; + for (auto const& server : serversRegistered) { + if (server.first == "Version") { + continue; } - - // Do nothing unless leader - if (_agent->leading()) { - _cv.wait(_frequency * 1000000); - } else { - _cv.wait(); - } - - // Do supervision - updateSnapshot(); - doChecks(); - shrinkCluster(); - workJobs(); - + LOG_TOPIC(DEBUG, Logger::AGENCY) + << "Waiting for " << server.first << " to shutdown"; } + if (serversCleared) { + ApplicationServer::server->beginShutdown(); + } +} + +bool Supervision::handleJobs() { + // Get bunch of job IDs from agency for future jobs + if (_jobId == 0 || _jobId == _jobIdMax) { + getUniqueIds(); // cannot fail but only hang + } + + // Do supervision + doChecks(); + shrinkCluster(); + workJobs(); + + return true; } void Supervision::workJobs() { diff --git a/arangod/Agency/Supervision.h b/arangod/Agency/Supervision.h index aedbc29631..a7f35fb823 100644 --- a/arangod/Agency/Supervision.h +++ b/arangod/Agency/Supervision.h @@ -145,6 +145,11 @@ class Supervision : public arangodb::Thread { void shrinkCluster(); + bool isShuttingDown(); + + bool handleJobs(); + void handleShutdown(); + Agent* _agent; /**< @brief My agent */ Node _snapshot; diff --git a/arangod/Cluster/HeartbeatThread.cpp b/arangod/Cluster/HeartbeatThread.cpp index 86feaab77f..2c6de32620 100644 --- a/arangod/Cluster/HeartbeatThread.cpp +++ b/arangod/Cluster/HeartbeatThread.cpp @@ -48,6 +48,7 @@ #include "VocBase/vocbase.h" using namespace arangodb; +using namespace arangodb::application_features; std::atomic HeartbeatThread::HasRunOnce(false); @@ -189,24 +190,31 @@ void HeartbeatThread::runDBServer() { // send an initial GET request to Sync/Commands/my-id LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Looking at Sync/Commands/" + _myId; + + AgencyReadTransaction trx(std::vector( + {_agency.prefixPath() + "Shutdown", + _agency.prefixPath() + "Current/Version", + _agency.prefixPath() + "Sync/Commands/" + _myId + })); - AgencyCommResult result = _agency.getValues("Sync/Commands/" + _myId); - - if (result.successful()) { - handleStateChange(result); - } - - if (isStopping()) { - break; - } - - LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Refetching Current/Version..."; - AgencyCommResult res = _agency.getValues("Current/Version"); - if (!res.successful()) { - LOG_TOPIC(ERR, Logger::HEARTBEAT) - << "Could not read Current/Version from agency."; + AgencyCommResult result = _agency.sendTransactionWithFailover(trx); + if (!result.successful()) { + LOG_TOPIC(WARN, Logger::HEARTBEAT) + << "Heartbeat: Could not read from agency!"; } else { - VPackSlice s = res.slice()[0].get( + VPackSlice shutdownSlice = result.slice()[0].get( + std::vector({_agency.prefix(), "Shutdown"}) + ); + + if (shutdownSlice.isBool() && shutdownSlice.getBool()) { + ApplicationServer::server->beginShutdown(); + break; + } + LOG_TOPIC(TRACE, Logger::HEARTBEAT) + << "Looking at Sync/Commands/" + _myId; + handleStateChange(result); + + VPackSlice s = result.slice()[0].get( std::vector({_agency.prefix(), std::string("Current"), std::string("Version")})); if (!s.isInteger()) { @@ -322,7 +330,8 @@ void HeartbeatThread::runCoordinator() { } AgencyReadTransaction trx(std::vector( - {_agency.prefixPath() + "Plan/Version", + {_agency.prefixPath() + "Shutdown", + _agency.prefixPath() + "Plan/Version", _agency.prefixPath() + "Current/Version", _agency.prefixPath() + "Current/Foxxmaster", _agency.prefixPath() + "Current/FoxxmasterQueueupdate", @@ -334,6 +343,15 @@ void HeartbeatThread::runCoordinator() { LOG_TOPIC(WARN, Logger::HEARTBEAT) << "Heartbeat: Could not read from agency!"; } else { + VPackSlice shutdownSlice = result.slice()[0].get( + std::vector({_agency.prefix(), "Shutdown"}) + ); + + if (shutdownSlice.isBool() && shutdownSlice.getBool()) { + ApplicationServer::server->beginShutdown(); + break; + } + LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Looking at Sync/Commands/" + _myId; diff --git a/arangod/RestHandler/RestShutdownHandler.cpp b/arangod/RestHandler/RestShutdownHandler.cpp index 42b2787968..86ae25ef02 100644 --- a/arangod/RestHandler/RestShutdownHandler.cpp +++ b/arangod/RestHandler/RestShutdownHandler.cpp @@ -24,6 +24,7 @@ #include "RestShutdownHandler.h" #include "Rest/HttpRequest.h" +#include "Cluster/AgencyComm.h" #include "Cluster/ClusterFeature.h" #include @@ -47,10 +48,25 @@ RestHandler::status RestShutdownHandler::execute() { generateError(GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED, 405); return status::DONE; } + bool removeFromCluster; + std::string const& remove = _request->value("remove_from_cluster", removeFromCluster); + removeFromCluster = removeFromCluster && remove == "1"; - bool found; - std::string const& remove = _request->value("remove_from_cluster", found); - if (found && remove == "1") { + bool shutdownClusterFound; + std::string const& shutdownCluster = _request->value("shutdown_cluster", shutdownClusterFound); + if (shutdownClusterFound && shutdownCluster == "1") { + AgencyComm agency; + + VPackBuilder builder; + builder.add(VPackValue(true)); + AgencyCommResult result = agency.setValue("Shutdown", builder.slice(), 0.0); + if (!result.successful()) { + generateError(GeneralResponse::ResponseCode::SERVER_ERROR, 500); + return status::DONE; + } + removeFromCluster = true; + } + if (removeFromCluster) { ClusterFeature* clusterFeature = ApplicationServer::getFeature("Cluster"); clusterFeature->setUnregisterOnShutdown(true); } From a90ca4e770e5f646831d5e554b2773456cb908a0 Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Wed, 17 Aug 2016 17:22:37 +0200 Subject: [PATCH 3/7] It finally seems to run --- js/client/modules/@arangodb/testing.js | 23 +++++++++++++------ .../@arangodb/testing/InstanceManager.js | 12 ++++++---- js/client/modules/@arangodb/testing/utils.js | 2 +- 3 files changed, 25 insertions(+), 12 deletions(-) diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index cb9ab037fe..6d843ea8c8 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -717,11 +717,9 @@ function performTests(options, testList, testname, runFn, instanceManager) { } while (options.loopEternal); } - if (instanceManager) { - print('Shutting down...'); - instanceManager.cleanup(); - print('done.'); - } + print('Shutting down...'); + instanceManager.cleanup(); + print('done.'); return results; } @@ -3214,8 +3212,19 @@ testFuncs.resilience = function (options) { testFuncs.client_resilience = function (options) { findTests(); - - return performTests(options, testsCases.client_resilience, 'client_resilience', createArangoshRunner()); + + let instanceManager = { + start: function() { + }, + check: function() { + }, + cleanup: function() { + }, + getEndpoint: function() { + return 'tcp://127.0.0.1:8529'; + } + } + return performTests(options, testsCases.client_resilience, 'client_resilience', createArangoshRunner(), instanceManager); }; // ////////////////////////////////////////////////////////////////////////////// diff --git a/js/client/modules/@arangodb/testing/InstanceManager.js b/js/client/modules/@arangodb/testing/InstanceManager.js index 3246268a42..3ad9a629d7 100644 --- a/js/client/modules/@arangodb/testing/InstanceManager.js +++ b/js/client/modules/@arangodb/testing/InstanceManager.js @@ -8,6 +8,7 @@ const makeArgsArangod = require('@arangodb/testing/utils.js').makeArgsArangod; const executeArangod = require('@arangodb/testing/utils.js').executeArangod; const makeAuthorizationHeaders = require('@arangodb/testing/utils.js').makeAuthorizationHeaders; const ARANGOD_BIN = require('@arangodb/testing/utils.js').ARANGOD_BIN; +const endpointToURL = require('@arangodb/common.js').endpointToURL; const killExternal = require('internal').killExternal; const statusExternal = require('internal').statusExternal; @@ -191,7 +192,9 @@ class InstanceManager { } getEndpoint() { - return this.coordinators()[0].endpoint; + return this.coordinators().filter(coordinator => { + return coordinator.exitStatus && coordinator.exitStatus.status == 'RUNNING'; + })[0].endpoint; } check() { @@ -209,7 +212,8 @@ class InstanceManager { console.warn('Shutting down cluster'); const requestOptions = makeAuthorizationHeaders({}); requestOptions.method = 'DELETE'; - download(this.coordinators()[0].url + '/_admin/shutdown?shutdown_cluster=1', '', requestOptions); + + download(endpointToURL(this.getEndpoint()) + '/_admin/shutdown?shutdown_cluster=1', '', requestOptions); let timeout = 60; let waitTime = 0.5; let start = Date.now(); @@ -230,7 +234,7 @@ class InstanceManager { if (totalTime / 1000 > timeout) { kap0tt = true; toShutdown.forEach(instance => { - //killExternal(instance.pid); + this.kill(instance); }); break; } @@ -267,7 +271,7 @@ class InstanceManager { } killExternal(instance.pid); - instance.status = 'KILLED'; + instance.exitStatus = {'status': 'KILLED'}; } restart(instance) { diff --git a/js/client/modules/@arangodb/testing/utils.js b/js/client/modules/@arangodb/testing/utils.js index c4d379808b..d3e291ab94 100644 --- a/js/client/modules/@arangodb/testing/utils.js +++ b/js/client/modules/@arangodb/testing/utils.js @@ -102,6 +102,7 @@ function startArango (protocol, options, addArgs, rootDir, role) { instanceInfo.options = options; instanceInfo.pid = executeArangod(ARANGOD_BIN, instanceInfo.args, options).pid; instanceInfo.role = role; + instanceInfo.exitStatus = {'status': 'RUNNING'} if (platform.substr(0, 3) === 'win') { const procdumpArgs = [ @@ -135,7 +136,6 @@ function findTopDir () { function startInstanceAgency(instanceInfo, protocol, options, addArgs, rootDir) { - console.error("HASS", instanceInfo); const dataDir = fs.join(rootDir, 'data'); const N = options.agencySize; From 066095b74f8e4953503cbc19b23547febe092dda Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Wed, 17 Aug 2016 17:25:22 +0200 Subject: [PATCH 4/7] exitStatus is now always there --- js/client/modules/@arangodb/testing.js | 4 ---- 1 file changed, 4 deletions(-) diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index 6d843ea8c8..4bd5ae8d72 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -456,10 +456,6 @@ function analyzeServerCrash (arangod, options, checkStr) { // / @brief periodic checks whether spawned arangod processes are still alive // ////////////////////////////////////////////////////////////////////////////// function checkArangoAlive (arangod, options) { - if (arangod.hasOwnProperty('exitStatus')) { - return false; - } - const res = statusExternal(arangod.pid, false); const ret = res.status === 'RUNNING'; From 3f412debf012cd4a6aa20560016beccad227546f Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Wed, 17 Aug 2016 18:11:04 +0200 Subject: [PATCH 5/7] Revert futile attempts to implement client resilience tests --- arangod/Agency/Node.cpp | 8 - arangod/Agency/Node.h | 2 - arangod/Agency/Supervision.cpp | 87 ++-- arangod/Agency/Supervision.h | 5 - arangod/Cluster/HeartbeatThread.cpp | 50 +-- arangod/RestHandler/RestShutdownHandler.cpp | 22 +- js/client/modules/@arangodb/testing.js | 371 +++++++++++++----- .../@arangodb/testing/InstanceManager.js | 287 -------------- js/client/modules/@arangodb/testing/utils.js | 269 ------------- js/client/tests/resilience/cluster-spec.js | 50 --- js/client/tests/resilience/foxxmaster-spec.js | 146 ------- js/client/tests/resilience/foxxmaster.js | 162 ++++++++ js/common/modules/@arangodb/common.js | 18 - 13 files changed, 479 insertions(+), 998 deletions(-) delete mode 100644 js/client/modules/@arangodb/testing/InstanceManager.js delete mode 100644 js/client/modules/@arangodb/testing/utils.js delete mode 100644 js/client/tests/resilience/cluster-spec.js delete mode 100644 js/client/tests/resilience/foxxmaster-spec.js create mode 100644 js/client/tests/resilience/foxxmaster.js diff --git a/arangod/Agency/Node.cpp b/arangod/Agency/Node.cpp index d46306c430..83a38b4a92 100644 --- a/arangod/Agency/Node.cpp +++ b/arangod/Agency/Node.cpp @@ -730,14 +730,6 @@ uint64_t Node::getUInt() const { } -bool Node::getBool() const { - if (type() == NODE) { - throw StoreException("Must not convert NODE type to bool"); - } - return slice().getBool(); - -} - double Node::getDouble() const { if (type() == NODE) { diff --git a/arangod/Agency/Node.h b/arangod/Agency/Node.h index 992834df85..cb1bcfb1f3 100644 --- a/arangod/Agency/Node.h +++ b/arangod/Agency/Node.h @@ -258,8 +258,6 @@ public: /// @brief Get insigned value (throws if type NODE or if conversion fails) uint64_t getUInt() const; - /// @brief Get bool value (throws if type NODE or if conversion fails) - bool getBool() const; /// @brief Get double value (throws if type NODE or if conversion fails) double getDouble() const; diff --git a/arangod/Agency/Supervision.cpp b/arangod/Agency/Supervision.cpp index d49785d958..b3732b2c2b 100644 --- a/arangod/Agency/Supervision.cpp +++ b/arangod/Agency/Supervision.cpp @@ -31,7 +31,6 @@ #include "Job.h" #include "Store.h" -#include "ApplicationFeatures/ApplicationServer.h" #include "Basics/ConditionLocker.h" #include "VocBase/server.h" @@ -39,7 +38,6 @@ using namespace arangodb; using namespace arangodb::consensus; -using namespace arangodb::application_features; std::string Supervision::_agencyPrefix = "/arango"; @@ -328,72 +326,45 @@ bool Supervision::doChecks() { } void Supervision::run() { + CONDITION_LOCKER(guard, _cv); TRI_ASSERT(_agent != nullptr); - // Get agency prefix after cluster init - if (_jobId == 0) { - // We need the agency prefix to work, but it is only initialized by - // some other server in the cluster. Since the supervision does not - // make sense at all without other ArangoDB servers, we wait pretty - // long here before giving up: - if (!updateAgencyPrefix(1000, 1)) { - LOG_TOPIC(ERR, Logger::AGENCY) - << "Cannot get prefix from Agency. Stopping supervision for good."; - return; - } - } - while (!this->isStopping()) { - updateSnapshot(); - if (isShuttingDown()) { - handleShutdown(); - } else if (_agent->leading()) { - if (!handleJobs()) { + + // Get agency prefix after cluster init + if (_jobId == 0) { + // We need the agency prefix to work, but it is only initialized by + // some other server in the cluster. Since the supervision does not + // make sense at all without other ArangoDB servers, we wait pretty + // long here before giving up: + if (!updateAgencyPrefix(1000, 1)) { + LOG_TOPIC(DEBUG, Logger::AGENCY) + << "Cannot get prefix from Agency. Stopping supervision for good."; break; } } - _cv.wait(_frequency * 1000000); - } -} - -bool Supervision::isShuttingDown() { - try { - return _snapshot("/Shutdown").getBool(); - } catch (...) { - return false; - } -} - -void Supervision::handleShutdown() { - LOG_TOPIC(DEBUG, Logger::AGENCY) << "Initiating shutdown"; - Node::Children const& serversRegistered = _snapshot(currentServersRegisteredPrefix).children(); - bool serversCleared = true; - for (auto const& server : serversRegistered) { - if (server.first == "Version") { - continue; + + // Get bunch of job IDs from agency for future jobs + if (_jobId == 0 || _jobId == _jobIdMax) { + getUniqueIds(); // cannot fail but only hang } - LOG_TOPIC(DEBUG, Logger::AGENCY) - << "Waiting for " << server.first << " to shutdown"; + + // Do nothing unless leader + if (_agent->leading()) { + _cv.wait(_frequency * 1000000); + } else { + _cv.wait(); + } + + // Do supervision + updateSnapshot(); + doChecks(); + shrinkCluster(); + workJobs(); + } - if (serversCleared) { - ApplicationServer::server->beginShutdown(); - } -} - -bool Supervision::handleJobs() { - // Get bunch of job IDs from agency for future jobs - if (_jobId == 0 || _jobId == _jobIdMax) { - getUniqueIds(); // cannot fail but only hang - } - - // Do supervision - doChecks(); - shrinkCluster(); - workJobs(); - - return true; } void Supervision::workJobs() { diff --git a/arangod/Agency/Supervision.h b/arangod/Agency/Supervision.h index a7f35fb823..aedbc29631 100644 --- a/arangod/Agency/Supervision.h +++ b/arangod/Agency/Supervision.h @@ -145,11 +145,6 @@ class Supervision : public arangodb::Thread { void shrinkCluster(); - bool isShuttingDown(); - - bool handleJobs(); - void handleShutdown(); - Agent* _agent; /**< @brief My agent */ Node _snapshot; diff --git a/arangod/Cluster/HeartbeatThread.cpp b/arangod/Cluster/HeartbeatThread.cpp index 2c6de32620..86feaab77f 100644 --- a/arangod/Cluster/HeartbeatThread.cpp +++ b/arangod/Cluster/HeartbeatThread.cpp @@ -48,7 +48,6 @@ #include "VocBase/vocbase.h" using namespace arangodb; -using namespace arangodb::application_features; std::atomic HeartbeatThread::HasRunOnce(false); @@ -190,31 +189,24 @@ void HeartbeatThread::runDBServer() { // send an initial GET request to Sync/Commands/my-id LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Looking at Sync/Commands/" + _myId; - - AgencyReadTransaction trx(std::vector( - {_agency.prefixPath() + "Shutdown", - _agency.prefixPath() + "Current/Version", - _agency.prefixPath() + "Sync/Commands/" + _myId - })); - AgencyCommResult result = _agency.sendTransactionWithFailover(trx); - if (!result.successful()) { - LOG_TOPIC(WARN, Logger::HEARTBEAT) - << "Heartbeat: Could not read from agency!"; - } else { - VPackSlice shutdownSlice = result.slice()[0].get( - std::vector({_agency.prefix(), "Shutdown"}) - ); + AgencyCommResult result = _agency.getValues("Sync/Commands/" + _myId); - if (shutdownSlice.isBool() && shutdownSlice.getBool()) { - ApplicationServer::server->beginShutdown(); - break; - } - LOG_TOPIC(TRACE, Logger::HEARTBEAT) - << "Looking at Sync/Commands/" + _myId; + if (result.successful()) { handleStateChange(result); - - VPackSlice s = result.slice()[0].get( + } + + if (isStopping()) { + break; + } + + LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Refetching Current/Version..."; + AgencyCommResult res = _agency.getValues("Current/Version"); + if (!res.successful()) { + LOG_TOPIC(ERR, Logger::HEARTBEAT) + << "Could not read Current/Version from agency."; + } else { + VPackSlice s = res.slice()[0].get( std::vector({_agency.prefix(), std::string("Current"), std::string("Version")})); if (!s.isInteger()) { @@ -330,8 +322,7 @@ void HeartbeatThread::runCoordinator() { } AgencyReadTransaction trx(std::vector( - {_agency.prefixPath() + "Shutdown", - _agency.prefixPath() + "Plan/Version", + {_agency.prefixPath() + "Plan/Version", _agency.prefixPath() + "Current/Version", _agency.prefixPath() + "Current/Foxxmaster", _agency.prefixPath() + "Current/FoxxmasterQueueupdate", @@ -343,15 +334,6 @@ void HeartbeatThread::runCoordinator() { LOG_TOPIC(WARN, Logger::HEARTBEAT) << "Heartbeat: Could not read from agency!"; } else { - VPackSlice shutdownSlice = result.slice()[0].get( - std::vector({_agency.prefix(), "Shutdown"}) - ); - - if (shutdownSlice.isBool() && shutdownSlice.getBool()) { - ApplicationServer::server->beginShutdown(); - break; - } - LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Looking at Sync/Commands/" + _myId; diff --git a/arangod/RestHandler/RestShutdownHandler.cpp b/arangod/RestHandler/RestShutdownHandler.cpp index 86ae25ef02..42b2787968 100644 --- a/arangod/RestHandler/RestShutdownHandler.cpp +++ b/arangod/RestHandler/RestShutdownHandler.cpp @@ -24,7 +24,6 @@ #include "RestShutdownHandler.h" #include "Rest/HttpRequest.h" -#include "Cluster/AgencyComm.h" #include "Cluster/ClusterFeature.h" #include @@ -48,25 +47,10 @@ RestHandler::status RestShutdownHandler::execute() { generateError(GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED, 405); return status::DONE; } - bool removeFromCluster; - std::string const& remove = _request->value("remove_from_cluster", removeFromCluster); - removeFromCluster = removeFromCluster && remove == "1"; - bool shutdownClusterFound; - std::string const& shutdownCluster = _request->value("shutdown_cluster", shutdownClusterFound); - if (shutdownClusterFound && shutdownCluster == "1") { - AgencyComm agency; - - VPackBuilder builder; - builder.add(VPackValue(true)); - AgencyCommResult result = agency.setValue("Shutdown", builder.slice(), 0.0); - if (!result.successful()) { - generateError(GeneralResponse::ResponseCode::SERVER_ERROR, 500); - return status::DONE; - } - removeFromCluster = true; - } - if (removeFromCluster) { + bool found; + std::string const& remove = _request->value("remove_from_cluster", found); + if (found && remove == "1") { ClusterFeature* clusterFeature = ApplicationServer::getFeature("Cluster"); clusterFeature->setUnregisterOnShutdown(true); } diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index 4bd5ae8d72..49926fb3a7 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -201,13 +201,6 @@ const time = require('internal').time; const toArgv = require('internal').toArgv; const wait = require('internal').wait; const platform = require('internal').platform; -const endpointToURL = require('@arangodb/common.js').endpointToURL; - -const findFreePort = require('@arangodb/testing/utils.js').findFreePort; -const startArango = require('@arangodb/testing/utils.js').startArango; -const makeArgsArangod = require('@arangodb/testing/utils.js').makeArgsArangod; -const executeArangod = require('@arangodb/testing/utils.js').executeArangod; -const makeAuthorizationHeaders = require('@arangodb/testing/utils.js').makeAuthorizationHeaders; const BLUE = require('internal').COLORS.COLOR_BLUE; const CYAN = require('internal').COLORS.COLOR_CYAN; @@ -288,6 +281,34 @@ function makeResults (testname) { }; } +// ////////////////////////////////////////////////////////////////////////////// +// / @brief arguments for testing (server) +// ////////////////////////////////////////////////////////////////////////////// + +function makeArgsArangod (options, appDir) { + if (appDir === undefined) { + appDir = fs.getTempPath(); + } + + fs.makeDirectoryRecursive(appDir, true); + + return { + 'configuration': 'none', + 'database.force-sync-properties': 'false', + 'database.maximal-journal-size': '1048576', + 'javascript.app-path': appDir, + 'javascript.startup-directory': JS_DIR, + 'javascript.v8-contexts': '5', + 'http.trusted-origin': options.httpTrustedOrigin || 'all', + 'log.level': 'warn', + 'log.level=replication=warn': null, + 'server.allow-use-database': 'true', + 'server.authentication': 'false', + 'server.threads': '20', + 'ssl.keyfile': PEM_FILE + }; +} + // ////////////////////////////////////////////////////////////////////////////// // / @brief arguments for testing (client) // ////////////////////////////////////////////////////////////////////////////// @@ -302,6 +323,36 @@ function makeArgsArangosh (options) { }; } +// ////////////////////////////////////////////////////////////////////////////// +// / @brief adds authorization headers +// ////////////////////////////////////////////////////////////////////////////// + +function makeAuthorizationHeaders (options) { + return { + 'headers': { + 'Authorization': 'Basic ' + base64Encode(options.username + ':' + + options.password) + } + }; +} +// ////////////////////////////////////////////////////////////////////////////// +// / @brief converts endpoints to URL +// ////////////////////////////////////////////////////////////////////////////// + +function endpointToURL (endpoint) { + if (endpoint.substr(0, 6) === 'ssl://') { + return 'https://' + endpoint.substr(6); + } + + const pos = endpoint.indexOf('://'); + + if (pos === -1) { + return 'http://' + endpoint; + } + + return 'http' + endpoint.substr(pos); +} + // ////////////////////////////////////////////////////////////////////////////// // / @brief scans the log files for important infos // ////////////////////////////////////////////////////////////////////////////// @@ -539,6 +590,29 @@ function cleanupDBDirectories (options) { } } +// ////////////////////////////////////////////////////////////////////////////// +// / @brief finds a free port +// ////////////////////////////////////////////////////////////////////////////// + +function findFreePort (maxPort) { + if (typeof maxPort !== 'number') { + maxPort = 32768; + } + if (maxPort < 2048) { + maxPort = 2048; + } + while (true) { + const port = Math.floor(Math.random() * (maxPort - 1024)) + 1024; + const free = testPort('tcp://0.0.0.0:' + port); + + if (free) { + return port; + } + } + + return 8529; +} + // ////////////////////////////////////////////////////////////////////////////// // / @brief build a unix path // ////////////////////////////////////////////////////////////////////////////// @@ -559,7 +633,7 @@ function makePathGeneric (path) { // / @brief runs a remote unittest file using /_admin/execute // ////////////////////////////////////////////////////////////////////////////// -function runThere (options, endpoint, file) { +function runThere (options, instanceInfo, file) { try { let testCode; @@ -581,7 +655,7 @@ function runThere (options, endpoint, file) { httpOptions.returnBodyOnError = true; - const reply = download(endpointToURL(endpoint) + '/_admin/execute?returnAsJSON=true', + const reply = download(instanceInfo.url + '/_admin/execute?returnAsJSON=true', testCode, httpOptions); @@ -614,27 +688,10 @@ runThere.info = 'runThere'; // / @brief runs a list of tests // ////////////////////////////////////////////////////////////////////////////// -function performTests(options, testList, testname, runFn, instanceManager) { - if (!instanceManager) { - let instanceInfo; - instanceManager = { - start: function() { - instanceInfo = startInstance('tcp', options, {}, testname); - return instanceInfo !== false; - }, - check: function() { - return checkInstanceAlive(instanceInfo, options); - }, - getEndpoint: function() { - return instanceInfo.endpoint; - }, - cleanup: function() { - shutdownInstance(instanceInfo, options); - } - } - } +function performTests (options, testList, testname, runFn) { + let instanceInfo = startInstance('tcp', options, {}, testname); - if (instanceManager && instanceManager.start() === false) { + if (instanceInfo === false) { return { setup: { status: false, @@ -656,65 +713,77 @@ function performTests(options, testList, testname, runFn, instanceManager) { let results = {}; let continueTesting = true; - - let logFn; - if (options.extremeVerbosity) { - logFn = (testname, reason) => { - print('Skipped ' + testname + ' because of ' + reason); + + for (let i = 0; i < testList.length; i++) { + let te = testList[i]; + let filtered = {}; + + if (filterTestcaseByOptions(te, options, filtered)) { + let first = true; + let loopCount = 0; + + while (first || options.loopEternal) { + if (!continueTesting) { + print('oops!'); + print('Skipping, ' + te + ' server is gone.'); + + results[te] = { + status: false, + message: instanceInfo.exitStatus + }; + + instanceInfo.exitStatus = 'server is gone.'; + + break; + } + + print('\n' + Date() + ' ' + runFn.info + ': Trying', te, '...'); + let reply = runFn(options, instanceInfo, te); + + if (reply.hasOwnProperty('status')) { + results[te] = reply; + + if (results[te].status === false) { + options.cleanup = false; + } + + if (!reply.status && !options.force) { + break; + } + } else { + results[te] = { + status: false, + message: reply + }; + + if (!options.force) { + break; + } + } + + continueTesting = checkInstanceAlive(instanceInfo, options); + + first = false; + + if (options.loopEternal) { + if (loopCount % options.loopSleepWhen === 0) { + print('sleeping...'); + sleep(options.loopSleepSec); + print('continuing.'); + } + + ++loopCount; + } + } + } else { + if (options.extremeVerbosity) { + print('Skipped ' + te + ' because of ' + filtered.filter); + } } - } else { - logFn = function() {}; - } - let testcases = testList.filter(testcase => { - return filterTestcaseByOptions(testcase, options, logFn); - }); - - let endpoint = instanceManager.getEndpoint(); - - for (let i = 0; i < testcases.length; i++) { - let te = testcases[i]; - let loopCount = 0; - do { - print('\n' + Date() + ' ' + runFn.info + ': Trying', te, '...'); - let reply = runFn(options, endpoint, te); - - if (reply.hasOwnProperty('status')) { - results[te] = reply; - - if (results[te].status === false) { - options.cleanup = false; - } - - if (!reply.status && !options.force) { - break; - } - } else { - results[te] = { - status: false, - message: reply - }; - - if (!options.force) { - break; - } - } - - continueTesting = instanceManager.check(); - - if (options.loopEternal) { - if (loopCount % options.loopSleepWhen === 0) { - print('sleeping...'); - sleep(options.loopSleepSec); - print('continuing.'); - } - - ++loopCount; - } - } while (options.loopEternal); } print('Shutting down...'); - instanceManager.cleanup(); + shutdownInstance(instanceInfo, options); print('done.'); return results; @@ -806,6 +875,43 @@ function runStressTest (options, command, testname) { return {}; } +// ////////////////////////////////////////////////////////////////////////////// +// / @brief executes a command, possible with valgrind +// ////////////////////////////////////////////////////////////////////////////// + +function executeArangod (cmd, args, options) { + if (options.valgrind) { + let valgrindOpts = {}; + + if (options.valgrindArgs) { + valgrindOpts = options.valgrindArgs; + } + + let testfn = options.valgrindFileBase; + + if (testfn.length > 0) { + testfn += '_'; + } + + if (valgrindOpts.xml === 'yes') { + valgrindOpts['xml-file'] = testfn + '.%p.xml'; + } + + valgrindOpts['log-file'] = testfn + '.%p.valgrind.log'; + + args = toArgv(valgrindOpts, true).concat([cmd]).concat(args); + cmd = options.valgrind; + } else if (options.rr) { + args = [cmd].concat(args); + cmd = 'rr'; + } + + if (options.extremeVerbosity) { + print('starting process ' + cmd + ' with arguments: ' + JSON.stringify(args)); + } + return executeExternal(cmd, args); +} + // ////////////////////////////////////////////////////////////////////////////// // / @brief executes a command and wait for result // ////////////////////////////////////////////////////////////////////////////// @@ -910,11 +1016,9 @@ function executeAndWait (cmd, args, options, valgrindTest) { // / @brief runs file in arangosh // ////////////////////////////////////////////////////////////////////////////// -function runInArangosh (options, endpoint, file, addArgs) { +function runInArangosh (options, instanceInfo, file, addArgs) { let args = makeArgsArangosh(options); - if (endpoint) { - args['server.endpoint'] = endpoint; - } + args['server.endpoint'] = instanceInfo.endpoint; args['javascript.unit-tests'] = fs.join(TOP_DIR, file); if (!options.verbose) { @@ -924,6 +1028,7 @@ function runInArangosh (options, endpoint, file, addArgs) { if (addArgs !== undefined) { args = Object.assign(args, addArgs); } + fs.write('instanceinfo.json', JSON.stringify(instanceInfo)); let rc = executeAndWait(ARANGOSH_BIN, toArgv(args), options); let result; @@ -942,8 +1047,8 @@ function runInArangosh (options, endpoint, file, addArgs) { } function createArangoshRunner(args) { - let runner = function(options, endpoint, file) { - return runInArangosh(options, endpoint, file, args); + let runner = function(options, instanceInfo, file) { + return runInArangosh(options, instanceInfo, file, args); }; runner.info = 'arangosh'; return runner; @@ -1240,6 +1345,75 @@ function startInstanceCluster (instanceInfo, protocol, options, return true; } +function startArango (protocol, options, addArgs, rootDir, role) { + const dataDir = fs.join(rootDir, 'data'); + const appDir = fs.join(rootDir, 'apps'); + + fs.makeDirectoryRecursive(dataDir); + fs.makeDirectoryRecursive(appDir); + + let args = makeArgsArangod(options, appDir); + let endpoint; + let port; + + if (!addArgs['server.endpoint']) { + port = findFreePort(options.maxPort); + endpoint = protocol + '://127.0.0.1:' + port; + } else { + endpoint = addArgs['server.endpoint']; + port = endpoint.split(':').pop(); + } + + let instanceInfo = { + role, port, endpoint, rootDir}; + + args['server.endpoint'] = endpoint; + args['database.directory'] = dataDir; + args['log.file'] = fs.join(rootDir, 'log'); + + if (options.verbose) { + args['log.level'] = 'info'; + } else { + args['log.level'] = 'error'; + } + + // flush log messages directly and not asynchronously + // (helps debugging) + args['log.force-direct'] = 'true'; + + if (protocol === 'ssl') { + args['ssl.keyfile'] = fs.join('UnitTests', 'server.pem'); + } + + args = Object.assign(args, options.extraArgs); + + if (addArgs !== undefined) { + args = Object.assign(args, addArgs); + } + + instanceInfo.url = endpointToURL(instanceInfo.endpoint); + instanceInfo.pid = executeArangod(ARANGOD_BIN, toArgv(args), options).pid; + instanceInfo.role = role; + + if (platform.substr(0, 3) === 'win') { + const procdumpArgs = [ + '-accepteula', + '-e', + '-ma', + instanceInfo.pid, + fs.join(rootDir, 'core.dmp') + ]; + + try { + instanceInfo.monitor = executeExternal('procdump', procdumpArgs); + } catch (x) { + print('failed to start procdump - is it installed?'); + throw x; + } + } + return instanceInfo; +} + function startInstanceAgency (instanceInfo, protocol, options, addArgs, rootDir) { const dataDir = fs.join(rootDir, 'data'); @@ -3208,19 +3382,12 @@ testFuncs.resilience = function (options) { testFuncs.client_resilience = function (options) { findTests(); - - let instanceManager = { - start: function() { - }, - check: function() { - }, - cleanup: function() { - }, - getEndpoint: function() { - return 'tcp://127.0.0.1:8529'; - } + options.cluster = true; + if (options.coordinators < 2) { + options.coordinators = 2; } - return performTests(options, testsCases.client_resilience, 'client_resilience', createArangoshRunner(), instanceManager); + + return performTests(options, testsCases.client_resilience, 'client_resilience', createArangoshRunner()); }; // ////////////////////////////////////////////////////////////////////////////// diff --git a/js/client/modules/@arangodb/testing/InstanceManager.js b/js/client/modules/@arangodb/testing/InstanceManager.js deleted file mode 100644 index 3ad9a629d7..0000000000 --- a/js/client/modules/@arangodb/testing/InstanceManager.js +++ /dev/null @@ -1,287 +0,0 @@ -const fs = require('fs'); -const path = require('path'); -const _ = require('lodash'); - -const findFreePort = require('@arangodb/testing/utils.js').findFreePort; -const startArango = require('@arangodb/testing/utils.js').startArango; -const makeArgsArangod = require('@arangodb/testing/utils.js').makeArgsArangod; -const executeArangod = require('@arangodb/testing/utils.js').executeArangod; -const makeAuthorizationHeaders = require('@arangodb/testing/utils.js').makeAuthorizationHeaders; -const ARANGOD_BIN = require('@arangodb/testing/utils.js').ARANGOD_BIN; -const endpointToURL = require('@arangodb/common.js').endpointToURL; - -const killExternal = require('internal').killExternal; -const statusExternal = require('internal').statusExternal; -const download = require('internal').download; -const wait = require('internal').wait; - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief periodic checks whether spawned arangod processes are still alive -// ////////////////////////////////////////////////////////////////////////////// -function checkArangoAlive (arangod, options = {}) { - if (arangod.hasOwnProperty('exitStatus')) { - return false; - } - - const res = statusExternal(arangod.pid, false); - const ret = res.status === 'RUNNING'; - - if (!ret) { - print('ArangoD with PID ' + arangod.pid + ' gone:'); - print(arangod); - - if (res.hasOwnProperty('signal') && - ((res.signal === 11) || - (res.signal === 6) || - // Windows sometimes has random numbers in signal... - (platform.substr(0, 3) === 'win') - ) - ) { - arangod.exitStatus = res; - //analyzeServerCrash(arangod, options, 'health Check'); - } - } - - return ret; -} - -let makeArgs = function (name, rootDir, options, args) { - args = args || options.extraArgs; - - let subDir = fs.join(rootDir, name); - fs.makeDirectoryRecursive(subDir); - - let subArgs = makeArgsArangod(options, fs.join(subDir, 'apps')); - subArgs = Object.assign(subArgs, args); - - return [subArgs, subDir]; -}; - -class InstanceManager { - constructor(name) { - this.rootDir = fs.join(fs.getTempFile(), name); - this.instances = []; - } - - startDbServer(options = {}) { - let endpoint = 'tcp://127.0.0.1:' + findFreePort(options.maxPort); - let primaryArgs = options.extraArgs ? _.clone(options.extraArgs) : {}; - primaryArgs['server.endpoint'] = endpoint; - primaryArgs['cluster.my-address'] = endpoint; - primaryArgs['cluster.my-local-info'] = endpoint; - primaryArgs['cluster.my-role'] = 'PRIMARY'; - primaryArgs['cluster.agency-endpoint'] = this.getAgencyEndpoint(); - - this.instances.push(startArango('tcp', options, ...makeArgs('dbserver' + Math.floor(Math.random() * 1000000000), this.rootDir, options, primaryArgs), 'dbserver')); - return this.instances[this.instances.length - 1]; - } - - getAgencyEndpoint() { - return this.instances.filter(instance => { - return instance.role == 'agent'; - })[0].endpoint; - } - - startCoordinator(options = {}) { - let endpoint = 'tcp://127.0.0.1:' + findFreePort(options.maxPort); - let coordinatorArgs = options.extraArgs ? _.clone(options.extraArgs) : {}; - coordinatorArgs['server.endpoint'] = endpoint; - coordinatorArgs['cluster.my-address'] = endpoint; - coordinatorArgs['cluster.my-local-info'] = endpoint; - coordinatorArgs['cluster.my-role'] = 'COORDINATOR'; - coordinatorArgs['cluster.agency-endpoint'] = this.getAgencyEndpoint(); - - this.instances.push(startArango('tcp', options, ...makeArgs('coordinator' + Math.floor(Math.random() * 1000000000), this.rootDir, options, coordinatorArgs), 'coordinator')); - return this.instances[this.instances.length - 1]; - } - - startAgency(options = {}) { - let size = options.agencySize || 1; - if (options.agencyWaitForSync === undefined) { - options.agencyWaitForSync = false; - } - const wfs = options.agencyWaitForSync; - for (var i=0;i { - return instance.role == 'agent'; - }) - .forEach(arangod => { - l.push('--agency.endpoint'); - l.push(arangod.endpoint); - }); - l.push('--agency.endpoint'); - l.push('tcp://127.0.0.1:' + port); - l.push('--agency.notify'); - l.push('true'); - - instanceArgs['flatCommands'] = l; - } - this.instances.push(startArango('tcp', options, instanceArgs, dir, 'agent')); - } - return this.instances.filter(instance => { - return instance.role == 'agent'; - }); - } - - - - startCluster(numAgents, numCoordinators, numDbServers, options = {}) { - print("Starting Cluster with Agents: " + numAgents + " Coordinators: " + numCoordinators + " DBServers: " + numDbServers); - - let agencyOptions = options.agents || {}; - _.extend(agencyOptions, {agencySize: numAgents}); - this.startAgency(agencyOptions); - - let coordinatorOptions = options.coordinators || {}; - let i; - for (i=0;i { - print("pid: " + instance.pid +", role: " + instance.role + ", endpoint: " + instance.endpoint); - }; - this.agents().forEach(debugInfo); - this.coordinators().forEach(debugInfo); - this.dbServers().forEach(debugInfo); - - return this.coordinators()[0].endpoint; - } - - waitForAllInstances() { - let count = 0; - this.instances.forEach(arangod => { - while (true) { - const reply = download(arangod.url + '/_api/version', '', makeAuthorizationHeaders(arangod.options)); - - if (!reply.error && reply.code === 200) { - break; - } - - ++count; - - if (count % 60 === 0) { - if (!checkArangoAlive(arangod)) { - throw new Error('Arangod with pid ' + arangod.pid + ' was not running. Full info: ' + JSON.stringify(arangod)); - } - } - wait(0.5, false); - } - }); - return this.getEndpoint(); - } - - getEndpoint() { - return this.coordinators().filter(coordinator => { - return coordinator.exitStatus && coordinator.exitStatus.status == 'RUNNING'; - })[0].endpoint; - } - - check() { - let failedInstances = this.instances.filter(instance => { - instance.exitStatus = statusExternal(instance.pid, false); - return instance.exitStatus.status != 'RUNNING'; - }); - - if (failedInstances.length > 0) { - throw new Error('Some instances died'); - } - } - - cleanup() { - console.warn('Shutting down cluster'); - const requestOptions = makeAuthorizationHeaders({}); - requestOptions.method = 'DELETE'; - - download(endpointToURL(this.getEndpoint()) + '/_admin/shutdown?shutdown_cluster=1', '', requestOptions); - let timeout = 60; - let waitTime = 0.5; - let start = Date.now(); - - let kap0tt = false; - let toShutdown = this.instances.slice(); - while (toShutdown.length > 0) { - toShutdown.forEach(instance => { - instance.exitStatus = statusExternal(instance.pid, false); - }); - - toShutdown = toShutdown.filter(instance => { - return instance.exitStatus.status == 'RUNNING'; - }); - - if (toShutdown.length > 0) { - let totalTime = Date.now() - start; - if (totalTime / 1000 > timeout) { - kap0tt = true; - toShutdown.forEach(instance => { - this.kill(instance); - }); - break; - } - wait(waitTime); - } - } - if (!kap0tt) { - fs.removeDirectoryRecursive(this.rootDir, true); - } - } - - dbServers() { - return this.instances.filter(instance => { - return instance.role == 'dbserver'; - }); - } - - coordinators() { - return this.instances.filter(instance => { - return instance.role == 'coordinator'; - }); - } - - agents() { - return this.instances.filter(instance => { - return instance.role == 'agent'; - }); - } - - kill(instance) { - let index = this.instances.indexOf(instance); - if (index === -1) { - throw new Error('Couldn\'t find instance', instance); - } - - killExternal(instance.pid); - instance.exitStatus = {'status': 'KILLED'}; - } - - restart(instance) { - let index = this.instances.indexOf(instance); - if (index === -1) { - throw new Error('Couldn\'t find instance', instance); - } - - instance.pid = executeArangod(ARANGOD_BIN, instance.args, instance.options).pid; - } -} - -module.exports = InstanceManager; diff --git a/js/client/modules/@arangodb/testing/utils.js b/js/client/modules/@arangodb/testing/utils.js deleted file mode 100644 index d3e291ab94..0000000000 --- a/js/client/modules/@arangodb/testing/utils.js +++ /dev/null @@ -1,269 +0,0 @@ -'use strict'; - -const fs = require('fs'); -const testPort = require('internal').testPort; -const endpointToURL = require('@arangodb/common.js').endpointToURL; -const toArgv = require('internal').toArgv; -const executeExternal = require('internal').executeExternal; -const platform = require('internal').platform; -const _ = require('lodash'); -const base64Encode = require('internal').base64Encode; - -let TOP_DIR = findTopDir(); -let BIN_DIR = fs.join(TOP_DIR, 'build', 'bin'); -let ARANGOD_BIN = fs.join(BIN_DIR, 'arangod'); - - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief executes a command, possible with valgrind -// ////////////////////////////////////////////////////////////////////////////// - -function executeArangod (cmd, args, options) { - if (options.valgrind) { - let valgrindOpts = {}; - - if (options.valgrindArgs) { - valgrindOpts = options.valgrindArgs; - } - - let testfn = options.valgrindFileBase; - - if (testfn.length > 0) { - testfn += '_'; - } - - if (valgrindOpts.xml === 'yes') { - valgrindOpts['xml-file'] = testfn + '.%p.xml'; - } - - valgrindOpts['log-file'] = testfn + '.%p.valgrind.log'; - - args = toArgv(valgrindOpts, true).concat([cmd]).concat(args); - cmd = options.valgrind; - } else if (options.rr) { - args = [cmd].concat(args); - cmd = 'rr'; - } - - if (options.extremeVerbosity) { - print('starting process ' + cmd + ' with arguments: ' + JSON.stringify(args)); - } - return executeExternal(cmd, args); -} - -function startArango (protocol, options, addArgs, rootDir, role) { - const dataDir = fs.join(rootDir, 'data'); - const appDir = fs.join(rootDir, 'apps'); - - fs.makeDirectoryRecursive(dataDir); - fs.makeDirectoryRecursive(appDir); - - let args = makeArgsArangod(options, appDir); - let endpoint; - let port; - - if (!addArgs['server.endpoint']) { - port = findFreePort(options.maxPort); - endpoint = protocol + '://127.0.0.1:' + port; - } else { - endpoint = addArgs['server.endpoint']; - port = endpoint.split(':').pop(); - } - - let instanceInfo = { - role, port, endpoint, rootDir}; - - args['server.endpoint'] = endpoint; - args['database.directory'] = dataDir; - args['log.file'] = fs.join(rootDir, 'log'); - - if (options.verbose) { - args['log.level'] = 'info'; - } else { - args['log.level'] = 'error'; - } - - // flush log messages directly and not asynchronously - // (helps debugging) - args['log.force-direct'] = 'true'; - - if (protocol === 'ssl') { - args['ssl.keyfile'] = fs.join('UnitTests', 'server.pem'); - } - - args = Object.assign(args, options.extraArgs); - - if (addArgs !== undefined) { - args = Object.assign(args, addArgs); - } - - instanceInfo.url = endpointToURL(instanceInfo.endpoint); - instanceInfo.args = toArgv(args); - instanceInfo.options = options; - instanceInfo.pid = executeArangod(ARANGOD_BIN, instanceInfo.args, options).pid; - instanceInfo.role = role; - instanceInfo.exitStatus = {'status': 'RUNNING'} - - if (platform.substr(0, 3) === 'win') { - const procdumpArgs = [ - '-accepteula', - '-e', - '-ma', - instanceInfo.pid, - fs.join(rootDir, 'core.dmp') - ]; - - try { - instanceInfo.monitor = executeExternal('procdump', procdumpArgs); - } catch (x) { - print('failed to start procdump - is it installed?'); - throw x; - } - } - return instanceInfo; -} - -function findTopDir () { - const topDir = fs.normalize(fs.makeAbsolute('.')); - - if (!fs.exists('3rdParty') && !fs.exists('arangod') && - !fs.exists('arangosh') && !fs.exists('UnitTests')) { - throw 'Must be in ArangoDB topdir to execute unit tests.'; - } - - return topDir; -} - -function startInstanceAgency(instanceInfo, protocol, options, - addArgs, rootDir) { - const dataDir = fs.join(rootDir, 'data'); - - const N = options.agencySize; - if (options.agencyWaitForSync === undefined) { - options.agencyWaitForSync = false; - } - const wfs = options.agencyWaitForSync; - - for (let i = 0; i < N; i++) { - let instanceArgs = _.clone(addArgs); - instanceArgs['agency.id'] = String(i); - instanceArgs['agency.size'] = String(N); - instanceArgs['agency.wait-for-sync'] = String(wfs); - instanceArgs['agency.supervision'] = 'true'; - instanceArgs['database.directory'] = dataDir + String(i); - - if (i === N - 1) { - const port = findFreePort(options.maxPort); - instanceArgs['server.endpoint'] = 'tcp://127.0.0.1:' + port; - let l = []; - instanceInfo.arangods.forEach(arangod => { - l.push('--agency.endpoint'); - l.push(arangod.endpoint); - }); - l.push('--agency.endpoint'); - l.push('tcp://127.0.0.1:' + port); - l.push('--agency.notify'); - l.push('true'); - - instanceArgs['flatCommands'] = l; - } - let dir = fs.join(rootDir, 'agency-' + i); - fs.makeDirectoryRecursive(dir); - - instanceInfo.arangods.push(startArango(protocol, options, instanceArgs, rootDir, 'agent')); - } - - instanceInfo.endpoint = instanceInfo.arangods[instanceInfo.arangods.length - 1].endpoint; - instanceInfo.url = instanceInfo.arangods[instanceInfo.arangods.length - 1].url; - instanceInfo.role = 'agent'; - print('Agency Endpoint: ' + instanceInfo.endpoint); - - return instanceInfo; -} - - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief arguments for testing (server) -// ////////////////////////////////////////////////////////////////////////////// - -function makeArgsArangod (options, appDir) { - if (appDir === undefined) { - appDir = fs.getTempPath(); - } - - fs.makeDirectoryRecursive(appDir, true); - - return { - 'configuration': 'none', - 'database.force-sync-properties': 'false', - 'database.maximal-journal-size': '1048576', - 'javascript.app-path': appDir, - 'javascript.startup-directory': 'js', - 'javascript.v8-contexts': '5', - 'http.trusted-origin': options.httpTrustedOrigin || 'all', - 'log.level': 'warn', - 'log.level=replication=warn': null, - 'server.allow-use-database': 'true', - 'server.authentication': 'false', - 'server.threads': '20', - 'ssl.keyfile': 'UnitTests/server.pem', - }; -} - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief finds a free port -// ////////////////////////////////////////////////////////////////////////////// - -function findFreePort (maxPort) { - if (typeof maxPort !== 'number') { - maxPort = 32768; - } - if (maxPort < 2048) { - maxPort = 2048; - } - while (true) { - const port = Math.floor(Math.random() * (maxPort - 1024)) + 1024; - const free = testPort('tcp://0.0.0.0:' + port); - - if (free) { - return port; - } - } - - return 8529; -} - -exports.findTopDir = findTopDir; -exports.findFreePort = findFreePort; -exports.startInstanceAgency = startInstanceAgency; -exports.startArango = startArango; -exports.executeArangod = executeArangod; -exports.makeArgsArangod = makeArgsArangod; - -exports.startAgency = function(size, options = {}) { - let instances = []; - options.agencySize = size; - let legacyInstanceInfo = {arangods: []}; - for (var i=0;i { - let url = arangod.endpoint.replace(/tcp/, 'http') + '/_admin/server/id'; - let res = request({method: 'GET', url: url}); - let parsed = JSON.parse(res.body); - return parsed.id === server; - })[0]; - - expect(instance).to.not.be.undefined; - instanceManager.kill(instance); - let newEndpoint = instanceManager.coordinators().filter(arangod => { - return arangod.role === 'coordinator' && arangod.pid !== instance.pid; - })[0]; - arango.reconnect(newEndpoint.endpoint, db._name(), 'root', ''); - let waitInterval = 0.1; - let waited = 0; - let ok = false; - while (waited <= 20) { - document = db._collection('foxxqueuetest').document('test'); - let newServer = document.server; - if (server !== newServer) { - ok = true; - break; - } - wait(waitInterval); - waited += waitInterval; - } - // mop: currently supervision would run every 5s - if (!ok) { - throw new Error('Supervision should have moved the foxxqueues and foxxqueues should have been started to run on a new coordinator'); - } - }) -}); diff --git a/js/client/tests/resilience/foxxmaster.js b/js/client/tests/resilience/foxxmaster.js new file mode 100644 index 0000000000..0b41640361 --- /dev/null +++ b/js/client/tests/resilience/foxxmaster.js @@ -0,0 +1,162 @@ +/*jshint strict: false, sub: true */ +/*global print, arango, assertTrue, assertNotNull, assertNotUndefined */ +'use strict'; + +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2014 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Andreas Streichardt +//////////////////////////////////////////////////////////////////////////////// + +const jsunity = require('jsunity'); +const arangodb = require('@arangodb'); +const wait = require('internal').wait; +const db = arangodb.db; +const fs = require('fs'); +const console = require('console'); +const request = require("@arangodb/request"); +const foxxManager = require('@arangodb/foxx/manager'); + +const suspendExternal = require('internal').suspendExternal; +const continueExternal = require("internal").continueExternal; +const download = require('internal').download; + +const instanceInfo = JSON.parse(fs.read('instanceinfo.json')); + +try { + let globals = JSON.parse(process.env.ARANGOSH_GLOBALS); + Object.keys(globals).forEach(g => { + global[g] = globals[g]; + }); +} catch (e) { +} + + +let executeOnServer = function(code) { + let httpOptions = {}; + httpOptions.method = 'POST'; + httpOptions.timeout = 3600; + + httpOptions.returnBodyOnError = true; + const reply = download(instanceInfo.url + '/_admin/execute?returnAsJSON=true', + code, + httpOptions); + + if (!reply.error && reply.code === 200) { + return JSON.parse(reply.body); + } else { + throw new Error('Could not send to server ' + JSON.stringify(reply)); + } +}; + +function serverSetup() { + let directory = require('./js/client/assets/queuetest/dirname.js'); + foxxManager.install(directory, '/queuetest'); + db._create('foxxqueuetest', {numberOfShards: 1, replicationFactor: 1}); + db.foxxqueuetest.insert({'_key': 'test', 'date': null, 'server': null}); + + const serverCode = ` +const queues = require('@arangodb/foxx/queues'); + +let queue = queues.create('q'); +queue.push({mount: '/queuetest', name: 'queuetest', 'repeatTimes': -1, 'repeatDelay': 1000}, {}); +`; + executeOnServer(serverCode); +} + +function serverTeardown() { + const serverCode = ` +const queues = require('@arangodb/foxx/queues'); +`; + executeOnServer(serverCode); + foxxManager.uninstall('/queuetest'); + db._drop('foxxqueuetest'); +} + +function FoxxmasterSuite() { + return { + setUp: function() { + serverSetup(); + wait(2.1); + }, + + tearDown : function () { + serverTeardown(); + }, + + testQueueWorks: function() { + let document = db._collection('foxxqueuetest').document('test'); + assertNotNull(document.server); + }, + + testQueueFailover: function() { + let document = db._collection('foxxqueuetest').document('test'); + let server = document.server; + assertNotNull(server); + + let instance = instanceInfo.arangods.filter(arangod => { + if (arangod.role === 'agent') { + return false; + } + let url = arangod.endpoint.replace(/tcp/, 'http') + '/_admin/server/id'; + let res = request({method: 'GET', url: url}); + let parsed = JSON.parse(res.body); + if (parsed.id === server) { + assertTrue(suspendExternal(arangod.pid)); + } + return parsed.id === server; + })[0]; + + assertNotUndefined(instance); + assertTrue(suspendExternal(instance.pid)); + + let newEndpoint = instanceInfo.arangods.filter(arangod => { + return arangod.role === 'coordinator' && arangod.pid !== instance.pid; + })[0]; + arango.reconnect(newEndpoint.endpoint, db._name(), 'root', ''); + let waitInterval = 0.1; + let waited = 0; + let ok = false; + while (waited <= 20) { + document = db._collection('foxxqueuetest').document('test'); + let newServer = document.server; + if (server !== newServer) { + ok = true; + break; + } + wait(waitInterval); + waited += waitInterval; + } + assertTrue(continueExternal(instance.pid)); + // mop: currently supervision would run every 5s + if (!ok) { + throw new Error('Supervision should have moved the foxxqueues and foxxqueues should have been started to run on a new coordinator'); + } + } + }; +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief executes the test suite +//////////////////////////////////////////////////////////////////////////////// + +jsunity.run(FoxxmasterSuite); + +return jsunity.done(); diff --git a/js/common/modules/@arangodb/common.js b/js/common/modules/@arangodb/common.js index 7849a70fb1..be51b53887 100644 --- a/js/common/modules/@arangodb/common.js +++ b/js/common/modules/@arangodb/common.js @@ -506,21 +506,3 @@ exports.checkAvailableVersions = function (version) { } } }; - -// ////////////////////////////////////////////////////////////////////////////// -// / @brief converts endpoints to URL -// ////////////////////////////////////////////////////////////////////////////// - -exports.endpointToURL = function(endpoint) { - if (endpoint.substr(0, 6) === 'ssl://') { - return 'https://' + endpoint.substr(6); - } - - const pos = endpoint.indexOf('://'); - - if (pos === -1) { - return 'http://' + endpoint; - } - - return 'http' + endpoint.substr(pos); -} From 8e15e928b0c6137e271a0a6fb8ab83fb269660da Mon Sep 17 00:00:00 2001 From: Simran Brucherseifer Date: Wed, 17 Aug 2016 18:46:59 +0200 Subject: [PATCH 6/7] Documentation improvments --- .../AQL/Fundamentals/TypeValueOrder.mdpp | 4 ++- Documentation/Books/AQL/README.mdpp | 2 ++ Documentation/Books/Manual/Graphs/README.mdpp | 4 +-- Documentation/Books/Manual/Indexing/Geo.mdpp | 9 +++++++ .../Books/Manual/Indexing/IndexBasics.mdpp | 23 +++++++++-------- .../Books/Manual/Indexing/Persistent.mdpp | 12 +++++++++ .../Books/Manual/ReleaseNotes/README.mdpp | 25 +++++++++++++++++++ .../Manual/Scalability/Architecture.mdpp | 3 +++ 8 files changed, 68 insertions(+), 14 deletions(-) diff --git a/Documentation/Books/AQL/Fundamentals/TypeValueOrder.mdpp b/Documentation/Books/AQL/Fundamentals/TypeValueOrder.mdpp index 2d39ef692e..649128de5d 100644 --- a/Documentation/Books/AQL/Fundamentals/TypeValueOrder.mdpp +++ b/Documentation/Books/AQL/Fundamentals/TypeValueOrder.mdpp @@ -69,7 +69,9 @@ result is defined as follows: - null: *null* is equal to *null* - boolean: *false* is less than *true* - number: numeric values are ordered by their cardinal value -- string: string values are ordered using a localized comparison, +- string: string values are ordered using a localized comparison, using the configured + [server language](../../Manual/Administration/Configuration/index.html#default-language) + for sorting according to the alphabetical order rules of that language Note: unlike in SQL, *null* can be compared to any value, including *null* itself, without the result being converted into *null* automatically. diff --git a/Documentation/Books/AQL/README.mdpp b/Documentation/Books/AQL/README.mdpp index e3de7fc69e..c649f3edef 100644 --- a/Documentation/Books/AQL/README.mdpp +++ b/Documentation/Books/AQL/README.mdpp @@ -21,6 +21,8 @@ and the different data models ArangoDB offers. In its purpose, AQL is similar to the Structured Query Language (SQL). AQL supports reading and modifying collection data, but it doesn't support data-definition operations such as creating and dropping databases, collections and indexes. +It is a pure data manipulation language (DML), not a data definition language +(DDL) or a data control language (DCL). The syntax of AQL queries is different to SQL, even if some keywords overlap. Nevertheless, AQL should be easy to understand for anyone with an SQL background. diff --git a/Documentation/Books/Manual/Graphs/README.mdpp b/Documentation/Books/Manual/Graphs/README.mdpp index 79c00f02ab..ce98b5b80a 100644 --- a/Documentation/Books/Manual/Graphs/README.mdpp +++ b/Documentation/Books/Manual/Graphs/README.mdpp @@ -63,11 +63,11 @@ So this question boils down to 'Can I afford the additional effort or do I need If you want to only traverse edges of a specific type, there are two ways to achieve this. The first would be an attribute in the edge document - i.e. `type`, where you specify a differentiator for the edge - -i.e. `"friends"`, `"family"`, `"maried"` or `"workmates"`, so you can later `FILTER e.type = "friends"` +i.e. `"friends"`, `"family"`, `"married"` or `"workmates"`, so you can later `FILTER e.type = "friends"` if you only want to follow the friend edges. Another way, which may be more efficient in some cases, is to use different edge collections for different -types of edges, so you have `friend_eges`, `family_edges`, `maried_edges` and `workmate_edges` as collection names. +types of edges, so you have `friend_eges`, `family_edges`, `married_edges` and `workmate_edges` as collection names. You can then configure several named graphs including a subset of the available edge and vertex collections - or you use anonymous graph queries, where you specify a list of edge collections to take into account in that query. To only follow friend edges, you would specify `friend_edges` as sole edge collection. diff --git a/Documentation/Books/Manual/Indexing/Geo.mdpp b/Documentation/Books/Manual/Indexing/Geo.mdpp index df1a28b80e..81407e1376 100644 --- a/Documentation/Books/Manual/Indexing/Geo.mdpp +++ b/Documentation/Books/Manual/Indexing/Geo.mdpp @@ -110,3 +110,12 @@ the index attributes or have non-numeric values in the index attributes will not be indexed. *ensureGeoConstraint* is deprecated and *ensureGeoIndex* should be used instead. +The index does not provide a `unique` option because of its limited usability. +It would prevent identical coordinates from being inserted only, but even a +slightly different location (like 1 inch or 1 cm off) would be unique again and +not considered a duplicate, although it probably should. The desired threshold +for detecting duplicates may vary for every project (including how to calculate +the distance even) and needs to be implemented on the application layer as needed. +You can write a [Foxx service](../Foxx/index.html) for this purpose and make use +of the AQL [geo functions](../../AQL/Functions/Geo.html) to find nearby +coordinates supported by a geo index. diff --git a/Documentation/Books/Manual/Indexing/IndexBasics.mdpp b/Documentation/Books/Manual/Indexing/IndexBasics.mdpp index b0d579e20a..830f3450e9 100644 --- a/Documentation/Books/Manual/Indexing/IndexBasics.mdpp +++ b/Documentation/Books/Manual/Indexing/IndexBasics.mdpp @@ -44,18 +44,18 @@ The primary index of a collection cannot be dropped or changed, and there is no mechanism to create user-defined primary indexes. -!SUBSECTION Edges Index +!SUBSECTION Edge Index Every [edge collection](../Appendix/Glossary.md#edge-collection) also has an -automatically created *edges index*. The edges index provides quick access to +automatically created *edge index*. The edge index provides quick access to documents by either their `_from` or `_to` attributes. It can therefore be used to quickly find connections between vertex documents and is invoked when -the connecting edges of a vertex are queried. +the connecting edges of a vertex are queried. -Edges indexes are used from within AQL when performing equality lookups on `_from` +Edge indexes are used from within AQL when performing equality lookups on `_from` or `_to` values in an edge collections. There are also dedicated functions to find edges given their `_from` or `_to` values that will always make use of the -edges index: +edge index: ```js db.collection.edges(""); @@ -66,13 +66,14 @@ db.collection.inEdges(""); db.collection.inEdges(""); ``` -Internally, the edges index is implemented as a hash index. It can be used for equality -lookups, but not for range queries or for sorting. As edges indexes are automatically -created for edge collections, it is not possible to create user-defined edges indexes. +Internally, the edge index is implemented as a hash index, which stores the union +of all `_from` and `_to` attributes. It can be used for equality +lookups, but not for range queries or for sorting. Edge indexes are automatically +created for edge collections. It is not possible to create user-defined edge indexes. However, it is possible to freely use the `_from` and `_to` attributes in user-defined indexes. -An edges index cannot be dropped or changed. +An edge index cannot be dropped or changed. !SUBSECTION Hash Index @@ -354,7 +355,7 @@ FOR doc IN posts RETURN doc ``` -The following FILTER conditions will not use the array index: +The following FILTER conditions will **not use** the array index: ```js FILTER doc.tags ANY == 'foobar' @@ -365,7 +366,7 @@ FILTER 'foobar' == doc.tags ``` It is also possible to create an index on subattributes of array values. This makes sense -when the index attribute is an array of objects, e.g. +if the index attribute is an array of objects, e.g. ```js db.posts.ensureIndex({ type: "hash", fields: [ "tags[*].name" ] }); diff --git a/Documentation/Books/Manual/Indexing/Persistent.mdpp b/Documentation/Books/Manual/Indexing/Persistent.mdpp index bed885c108..84eb00927b 100644 --- a/Documentation/Books/Manual/Indexing/Persistent.mdpp +++ b/Documentation/Books/Manual/Indexing/Persistent.mdpp @@ -154,3 +154,15 @@ and ``` will match. +!SECTION Persistent Indexes and Server Language + +The order of index entries in persistent indexes adheres to the configured +[server language](../Administration/Configuration/README.md#default-language). +If, however, the server is restarted with a different language setting as when +the persistent index was created, not all documents may be returned anymore and +the sort order of those which are returned can be wrong (whenever the persistent +index is consulted). + +To fix persistent indexes after a language change, delete and re-create them. +Skiplist indexes are not affected, because they are not persisted and +automatically rebuilt on every server start. diff --git a/Documentation/Books/Manual/ReleaseNotes/README.mdpp b/Documentation/Books/Manual/ReleaseNotes/README.mdpp index d62bfdd167..d53a8d4423 100644 --- a/Documentation/Books/Manual/ReleaseNotes/README.mdpp +++ b/Documentation/Books/Manual/ReleaseNotes/README.mdpp @@ -1 +1,26 @@ !CHAPTER Release Notes + +!SECTION Whats New + +- [Whats New in 3.0](NewFeatures30.md) +- [Whats New in 2.8](NewFeatures28.md) +- [Whats New in 2.7](NewFeatures27.md) +- [Whats New in 2.6](NewFeatures26.md) +- [Whats New in 2.5](NewFeatures25.md) +- [Whats New in 2.4](NewFeatures24.md) +- [Whats New in 2.3](NewFeatures23.md) +- [Whats New in 2.2](NewFeatures22.md) +- [Whats New in 2.1](NewFeatures21.md) + +!SECTION Incompatible changes + +Also see [Upgrading](../Administration/Upgrading/README.md) in the +Administration chapter. + +- [Incompatible changes in 3.0](UpgradingChanges30.md) +- [Incompatible changes in 2.8](UpgradingChanges28.md) +- [Incompatible changes in 2.7](UpgradingChanges27.md) +- [Incompatible changes in 2.6](UpgradingChanges26.md) +- [Incompatible changes in 2.5](UpgradingChanges25.md) +- [Incompatible changes in 2.4](UpgradingChanges24.md) +- [Incompatible changes in 2.3](UpgradingChanges23.md) diff --git a/Documentation/Books/Manual/Scalability/Architecture.mdpp b/Documentation/Books/Manual/Scalability/Architecture.mdpp index cb56d677f1..c9dfc9cff2 100644 --- a/Documentation/Books/Manual/Scalability/Architecture.mdpp +++ b/Documentation/Books/Manual/Scalability/Architecture.mdpp @@ -92,6 +92,9 @@ it will automatically figure out where the data is stored (read) or to be stored (write). The information about the shards is shared across the coordinators using the Agency. +Also see [Sharding](../Administration/Sharding/README.md) in the +Administration chapter. + !SUBSECTION Many sensible configurations This architecture is very flexible and thus allows many configurations, From 41a162bce0ec27a566a70afd090a1dbff8eae9c2 Mon Sep 17 00:00:00 2001 From: Alan Plum Date: Wed, 17 Aug 2016 21:52:57 +0200 Subject: [PATCH 7/7] Fix default of legacy flag --- js/apps/system/_admin/aardvark/APP/foxxes.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/js/apps/system/_admin/aardvark/APP/foxxes.js b/js/apps/system/_admin/aardvark/APP/foxxes.js index cab4c2ac69..10480c02ad 100644 --- a/js/apps/system/_admin/aardvark/APP/foxxes.js +++ b/js/apps/system/_admin/aardvark/APP/foxxes.js @@ -64,7 +64,7 @@ router.use(foxxRouter) const installer = createRouter(); foxxRouter.use(installer) -.queryParam('legacy', joi.boolean().default(true), dd` +.queryParam('legacy', joi.boolean().default(false), dd` Flag to install the service in legacy mode. `) .queryParam('upgrade', joi.boolean().default(false), dd`