mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'aql-jmmh-conditions' of github.com:arangodb/arangodb into aql-jmmh-conditions
This commit is contained in:
commit
00c99f135a
|
@ -25,6 +25,10 @@ v2.8.0 (XXXX-XX-XX)
|
|||
v2.7.0 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* fixed replication with a 2.6 replication configuration and issues with a 2.6 master
|
||||
|
||||
* raised default value of `--server.descriptors-minimum` to 1024
|
||||
|
||||
* allow Foxx apps to be installed underneath URL path `/_open/`, so they can be
|
||||
(intentionally) accessed without authentication.
|
||||
|
||||
|
|
|
@ -159,6 +159,18 @@ namespace triagens {
|
|||
return _root;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief whether or not the condition is empty
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
inline bool isEmpty () const {
|
||||
if (_root == nullptr) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return (_root->numMembers() == 0);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return the condition as a Json object
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -40,8 +40,6 @@ bool ConditionFinder::before (ExecutionNode* en) {
|
|||
// something that can throw is not safe to optimize
|
||||
_filters.clear();
|
||||
_sorts.clear();
|
||||
delete _condition;
|
||||
_condition = nullptr;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -100,41 +98,51 @@ bool ConditionFinder::before (ExecutionNode* en) {
|
|||
TRI_ASSERT(outvars.size() == 1);
|
||||
|
||||
_variableDefinitions.emplace(outvars[0]->id, static_cast<CalculationNode const*>(en)->expression()->node());
|
||||
|
||||
if (_filters.find(outvars[0]->id) != _filters.end()) {
|
||||
// a variable used in a FILTER
|
||||
auto expressionNode = static_cast<CalculationNode const*>(en)->expression()->node();
|
||||
|
||||
if (_condition == nullptr) {
|
||||
// did not have any expression before. now save what we found
|
||||
_condition = new Condition(_plan->getAst());
|
||||
}
|
||||
|
||||
TRI_ASSERT(_condition != nullptr);
|
||||
_condition->andCombine(expressionNode);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case EN::ENUMERATE_COLLECTION: {
|
||||
if (_condition == nullptr) {
|
||||
// No one used a filter up to now. Leave this node
|
||||
break;
|
||||
}
|
||||
|
||||
auto node = static_cast<EnumerateCollectionNode const*>(en);
|
||||
if (_changes->find(node->id()) != _changes->end()) {
|
||||
std::cout << "Already optimized " << node->id() << std::endl;
|
||||
break;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_condition != nullptr);
|
||||
_condition->normalize(_plan);
|
||||
auto const& varsValid = node->getVarsValid();
|
||||
std::unordered_set<Variable const*> varsUsed;
|
||||
|
||||
std::unique_ptr<Condition> condition(new Condition(_plan->getAst()));
|
||||
|
||||
for (auto& it : _variableDefinitions) {
|
||||
if (_filters.find(it.first) != _filters.end()) {
|
||||
// a variable used in a FILTER
|
||||
|
||||
// now check if all variables from the FILTER condition are still valid here
|
||||
Ast::getReferencedVariables(it.second, varsUsed);
|
||||
bool valid = true;
|
||||
for (auto& it2 : varsUsed) {
|
||||
if (varsValid.find(it2) == varsValid.end()) {
|
||||
valid = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (valid) {
|
||||
condition->andCombine(it.second);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
condition->normalize(_plan);
|
||||
|
||||
if (condition->isEmpty()) {
|
||||
// no filter conditions left
|
||||
break;
|
||||
}
|
||||
|
||||
std::vector<Index const*> usedIndexes;
|
||||
SortCondition sortCondition(_sorts, _variableDefinitions);
|
||||
|
||||
if (_condition->findIndexes(node, usedIndexes, sortCondition)) {
|
||||
if (condition->findIndexes(node, usedIndexes, sortCondition)) {
|
||||
TRI_ASSERT(! usedIndexes.empty());
|
||||
std::cout << node->id() << " Number of indexes used: " << usedIndexes.size() << std::endl;
|
||||
// We either can find indexes for everything or findIndexes will clear out usedIndexes
|
||||
|
@ -145,11 +153,9 @@ bool ConditionFinder::before (ExecutionNode* en) {
|
|||
node->collection(),
|
||||
node->outVariable(),
|
||||
usedIndexes,
|
||||
_condition->clone()
|
||||
condition.get()
|
||||
));
|
||||
|
||||
// We handed over the condition to the created IndexNode
|
||||
_shouldFreeCondition = false; // TODO: check if we can get rid of this variable
|
||||
condition.release();
|
||||
|
||||
// We keep this nodes change
|
||||
_changes->emplace(node->id(), newNode.get());
|
||||
|
|
|
@ -46,19 +46,13 @@ namespace triagens {
|
|||
ConditionFinder (ExecutionPlan* plan,
|
||||
std::unordered_map<size_t, ExecutionNode*>* changes)
|
||||
: _plan(plan),
|
||||
_condition(nullptr),
|
||||
_variableDefinitions(),
|
||||
_filters(),
|
||||
_sorts(),
|
||||
_changes(changes),
|
||||
_shouldFreeCondition(true) {
|
||||
_changes(changes) {
|
||||
};
|
||||
|
||||
~ConditionFinder () {
|
||||
// TODO: decide whether conditions should belong to individual IndexNodes or are shared
|
||||
if (_shouldFreeCondition) {
|
||||
delete _condition;
|
||||
}
|
||||
}
|
||||
|
||||
std::vector<std::pair<AstNode const*, bool>> translateSorts () const;
|
||||
|
@ -70,13 +64,11 @@ namespace triagens {
|
|||
private:
|
||||
|
||||
ExecutionPlan* _plan;
|
||||
Condition* _condition;
|
||||
std::unordered_map<VariableId, AstNode const*> _variableDefinitions;
|
||||
std::unordered_set<VariableId> _filters;
|
||||
std::vector<std::pair<VariableId, bool>> _sorts;
|
||||
// note: this class will never free the contents of this map
|
||||
std::unordered_map<size_t, ExecutionNode*>* _changes;
|
||||
bool _shouldFreeCondition;
|
||||
|
||||
};
|
||||
}
|
||||
|
|
|
@ -919,7 +919,7 @@ ExecutionNode::RegisterPlan* ExecutionNode::RegisterPlan::clone (ExecutionPlan*
|
|||
return other.release();
|
||||
}
|
||||
|
||||
void ExecutionNode::RegisterPlan::after (ExecutionNode *en) {
|
||||
void ExecutionNode::RegisterPlan::after (ExecutionNode* en) {
|
||||
switch (en->getType()) {
|
||||
case ExecutionNode::ENUMERATE_COLLECTION: {
|
||||
depth++;
|
||||
|
@ -931,11 +931,11 @@ void ExecutionNode::RegisterPlan::after (ExecutionNode *en) {
|
|||
|
||||
auto ep = static_cast<EnumerateCollectionNode const*>(en);
|
||||
TRI_ASSERT(ep != nullptr);
|
||||
varInfo.emplace(ep->_outVariable->id, VarInfo(depth, totalNrRegs));
|
||||
varInfo.emplace(ep->outVariable()->id, VarInfo(depth, totalNrRegs));
|
||||
totalNrRegs++;
|
||||
break;
|
||||
}
|
||||
|
||||
|
||||
case ExecutionNode::INDEX_RANGE: {
|
||||
depth++;
|
||||
nrRegsHere.emplace_back(1);
|
||||
|
|
|
@ -66,12 +66,7 @@ void IndexNode::toJsonHelper (triagens::basics::Json& nodes,
|
|||
|
||||
json("indexes", indexes);
|
||||
|
||||
if (_condition != nullptr) {
|
||||
json("condition", _condition->toJson(TRI_UNKNOWN_MEM_ZONE));
|
||||
}
|
||||
else {
|
||||
json("condition", triagens::basics::Json(triagens::basics::Json::Object));
|
||||
}
|
||||
json("condition", _condition->toJson(TRI_UNKNOWN_MEM_ZONE));
|
||||
|
||||
// And add it:
|
||||
nodes(json);
|
||||
|
@ -124,6 +119,16 @@ IndexNode::IndexNode (ExecutionPlan* plan,
|
|||
|
||||
_indexes.emplace_back(index);
|
||||
}
|
||||
|
||||
// TODO: rebuild _condition here!!
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief destroy the IndexNode
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
IndexNode::~IndexNode () {
|
||||
delete _condition;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -160,11 +165,9 @@ std::vector<Variable const*> IndexNode::getVariablesUsedHere () const {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void IndexNode::getVariablesUsedHere (std::unordered_set<Variable const*>& vars) const {
|
||||
if (_condition != nullptr) {
|
||||
Ast::getReferencedVariables(_condition->root(), vars);
|
||||
Ast::getReferencedVariables(_condition->root(), vars);
|
||||
|
||||
vars.erase(_outVariable);
|
||||
}
|
||||
vars.erase(_outVariable);
|
||||
}
|
||||
|
||||
// Local Variables:
|
||||
|
|
|
@ -78,8 +78,7 @@ namespace triagens {
|
|||
|
||||
IndexNode (ExecutionPlan*, triagens::basics::Json const& base);
|
||||
|
||||
~IndexNode () {
|
||||
}
|
||||
~IndexNode ();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return the type of the node
|
||||
|
|
|
@ -844,7 +844,7 @@ int triagens::aql::removeSortRandRule (Optimizer* opt,
|
|||
case EN::FILTER:
|
||||
case EN::SUBQUERY:
|
||||
case EN::ENUMERATE_LIST:
|
||||
case EN::INDEX: // TODO FIXME
|
||||
case EN::INDEX:
|
||||
case EN::INDEX_RANGE: {
|
||||
// if we found another SortNode, an AggregateNode, FilterNode, a SubqueryNode,
|
||||
// an EnumerateListNode or an IndexRangeNode
|
||||
|
@ -2065,11 +2065,11 @@ int triagens::aql::useIndexesRule (Optimizer* opt,
|
|||
newPlan->registerNode(newNode);
|
||||
newPlan->replaceNode(newPlan->getNodeById(it.first), newNode);
|
||||
|
||||
// prevent double deletion by cleanupChanges below
|
||||
// prevent double deletion by cleanupChanges()
|
||||
it.second = nullptr;
|
||||
}
|
||||
opt->addPlan(newPlan.release(), rule, true);
|
||||
changes.clear();
|
||||
opt->addPlan(newPlan.release(), rule, true);
|
||||
}
|
||||
else {
|
||||
opt->addPlan(plan, rule, false);
|
||||
|
@ -3453,11 +3453,6 @@ static bool NextPermutationTuple (std::vector<size_t>& data,
|
|||
int triagens::aql::interchangeAdjacentEnumerationsRule (Optimizer* opt,
|
||||
ExecutionPlan* plan,
|
||||
Optimizer::Rule const* rule) {
|
||||
// TODO FIXME: rule currently disabled because it breaks the IndexNode stuff
|
||||
opt->addPlan(plan, rule, false);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
||||
|
||||
std::vector<ExecutionNode*>&& nodes = plan->findNodesOfType(EN::ENUMERATE_COLLECTION, true);
|
||||
|
||||
std::unordered_set<ExecutionNode*> nodesSet;
|
||||
|
|
|
@ -98,10 +98,8 @@ namespace triagens {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
inline bool needsRegister () const {
|
||||
char const cf = name[0];
|
||||
char const cb = name.back();
|
||||
// variables starting with a number are not user-defined
|
||||
return (cf < '0' || cf > '9') || cb != '_';
|
||||
return isUserDefined() || name.back() != '_';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -342,7 +342,7 @@ ApplicationScheduler::ApplicationScheduler (ApplicationServer* applicationServer
|
|||
_multiSchedulerAllowed(true),
|
||||
_nrSchedulerThreads(4),
|
||||
_backend(0),
|
||||
_descriptorMinimum(256),
|
||||
_descriptorMinimum(1024),
|
||||
_disableControlCHandler(false) {
|
||||
}
|
||||
|
||||
|
|
|
@ -225,13 +225,14 @@ static int LoadConfiguration (TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
std::unique_ptr<TRI_json_t> json(TRI_JsonFile(TRI_UNKNOWN_MEM_ZONE, filename, nullptr));
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
|
||||
|
||||
if (! TRI_IsObjectJson(json.get())) {
|
||||
LOG_ERROR("unable to read replication applier configuration from file '%s'", filename);
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
|
||||
return TRI_ERROR_REPLICATION_INVALID_APPLIER_CONFIGURATION;
|
||||
}
|
||||
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
|
||||
|
||||
if (config->_endpoint != nullptr) {
|
||||
TRI_FreeString(TRI_CORE_MEM_ZONE, config->_endpoint);
|
||||
|
@ -250,20 +251,8 @@ static int LoadConfiguration (TRI_vocbase_t* vocbase,
|
|||
config->_password = nullptr;
|
||||
}
|
||||
|
||||
// read the endpoint
|
||||
TRI_json_t const* value = TRI_LookupObjectJson(json.get(), "endpoint");
|
||||
|
||||
if (! TRI_IsStringJson(value)) {
|
||||
res = TRI_ERROR_REPLICATION_INVALID_APPLIER_CONFIGURATION;
|
||||
}
|
||||
else {
|
||||
config->_endpoint = TRI_DuplicateString2Z(TRI_CORE_MEM_ZONE,
|
||||
value->_value._string.data,
|
||||
value->_value._string.length - 1);
|
||||
}
|
||||
|
||||
// read the database name
|
||||
value = TRI_LookupObjectJson(json.get(), "database");
|
||||
TRI_json_t const* value = TRI_LookupObjectJson(json.get(), "database");
|
||||
|
||||
if (! TRI_IsStringJson(value)) {
|
||||
config->_database = TRI_DuplicateStringZ(TRI_CORE_MEM_ZONE,
|
||||
|
@ -385,8 +374,21 @@ static int LoadConfiguration (TRI_vocbase_t* vocbase,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
// read the endpoint
|
||||
value = TRI_LookupObjectJson(json.get(), "endpoint");
|
||||
|
||||
return res;
|
||||
if (! TRI_IsStringJson(value)) {
|
||||
// we haven't found an endpoint. now don't let the start fail but continue
|
||||
config->_autoStart = false;
|
||||
}
|
||||
else {
|
||||
config->_endpoint = TRI_DuplicateString2Z(TRI_CORE_MEM_ZONE,
|
||||
value->_value._string.data,
|
||||
value->_value._string.length - 1);
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -559,10 +561,6 @@ TRI_replication_applier_t* TRI_CreateReplicationApplier (TRI_server_t* server,
|
|||
TRI_vocbase_t* vocbase) {
|
||||
TRI_replication_applier_t* applier = new TRI_replication_applier_t(server, vocbase);
|
||||
|
||||
if (applier == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
TRI_InitConfigurationReplicationApplier(&applier->_configuration);
|
||||
TRI_InitStateReplicationApplier(&applier->_state);
|
||||
|
||||
|
@ -571,10 +569,10 @@ TRI_replication_applier_t* TRI_CreateReplicationApplier (TRI_server_t* server,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR &&
|
||||
res != TRI_ERROR_FILE_NOT_FOUND) {
|
||||
TRI_set_errno(res);
|
||||
TRI_DestroyStateReplicationApplier(&applier->_state);
|
||||
TRI_DestroyConfigurationReplicationApplier(&applier->_configuration);
|
||||
delete applier;
|
||||
TRI_set_errno(res);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -583,10 +581,10 @@ TRI_replication_applier_t* TRI_CreateReplicationApplier (TRI_server_t* server,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR &&
|
||||
res != TRI_ERROR_FILE_NOT_FOUND) {
|
||||
TRI_set_errno(res);
|
||||
TRI_DestroyStateReplicationApplier(&applier->_state);
|
||||
TRI_DestroyConfigurationReplicationApplier(&applier->_configuration);
|
||||
delete applier;
|
||||
TRI_set_errno(res);
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -897,8 +895,9 @@ int TRI_LoadStateReplicationApplier (TRI_vocbase_t* vocbase,
|
|||
}
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
// read the safeResumeTick
|
||||
res |= ReadTick(json.get(), "safeResumeTick", &state->_safeResumeTick, true);
|
||||
// read the safeResumeTick. note: this is an optional attribute
|
||||
state->_safeResumeTick = 0;
|
||||
ReadTick(json.get(), "safeResumeTick", &state->_safeResumeTick, true);
|
||||
}
|
||||
|
||||
LOG_TRACE("replication state file read successfully");
|
||||
|
|
|
@ -1486,8 +1486,9 @@ TRI_vocbase_t* TRI_OpenVocBase (TRI_server_t* server,
|
|||
vocbase->_replicationApplier = TRI_CreateReplicationApplier(server, vocbase);
|
||||
|
||||
if (vocbase->_replicationApplier == nullptr) {
|
||||
// TODO
|
||||
LOG_FATAL_AND_EXIT("initializing replication applier for database '%s' failed", vocbase->_name);
|
||||
LOG_FATAL_AND_EXIT("initializing replication applier for database '%s' failed: %s",
|
||||
vocbase->_name,
|
||||
TRI_last_error());
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@
|
|||
var FoxxManager = require("org/arangodb/foxx/manager");
|
||||
var fs = require("fs");
|
||||
var db = require("internal").db;
|
||||
var basePath = fs.makeAbsolute(fs.join(module.startupPath(), "common", "test-data", "apps"));
|
||||
var basePath = fs.makeAbsolute(fs.join(require("internal").startupPath, "common", "test-data", "apps"));
|
||||
var arango = require("org/arangodb").arango;
|
||||
var originalEndpoint = arango.getEndpoint().replace(/localhost/, '127.0.0.1');
|
||||
|
|
@ -609,6 +609,21 @@ Module._extensions['.coffee'] = function(module, filename) {
|
|||
module._compile(cs.compile(stripBOM(content), {bare: true}), filename);
|
||||
};
|
||||
|
||||
// backwards compatibility
|
||||
Module._oldAppPath = function () {
|
||||
if (internal.appPath === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return fs.join(internal.appPath, 'databases', internal.db._name());
|
||||
};
|
||||
|
||||
// backwards compatibility
|
||||
Module._devAppPath = function () {
|
||||
if (internal.devAppPath === undefined) {
|
||||
return undefined;
|
||||
}
|
||||
return fs.join(internal.devAppPath, 'databases', internal.db._name());
|
||||
};
|
||||
|
||||
// backwards compatibility
|
||||
Module.Module = Module;
|
||||
|
|
|
@ -180,7 +180,7 @@ function Done (suiteName) {
|
|||
/// @brief runs a JSUnity test file
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function RunTest (path) {
|
||||
function RunTest (path, outputReply) {
|
||||
var content;
|
||||
var f;
|
||||
|
||||
|
@ -193,7 +193,13 @@ function RunTest (path) {
|
|||
throw "cannot create context function";
|
||||
}
|
||||
|
||||
return f(path);
|
||||
var rc = f(path);
|
||||
if (outputReply === true) {
|
||||
return rc;
|
||||
}
|
||||
else {
|
||||
return rc.status;
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
|
@ -14,13 +14,13 @@ function runJSUnityTests(tests) {
|
|||
var allResults = [];
|
||||
var res;
|
||||
|
||||
// find out whether we're on server or client...
|
||||
var runenvironment = "arangod";
|
||||
if (typeof(require('internal').arango) === 'object') {
|
||||
runenvironment = "arangosh";
|
||||
}
|
||||
|
||||
_.each(tests, function (file) {
|
||||
// find out whether we're on server or client...
|
||||
var runenvironment = "arangod";
|
||||
if (typeof(require('internal').arango) === 'object') {
|
||||
runenvironment = "arangosh";
|
||||
}
|
||||
|
||||
if (result) {
|
||||
print("\n" + Date() + " " + runenvironment + ": Running JSUnity test from file '" + file + "'");
|
||||
} else {
|
||||
|
@ -29,7 +29,7 @@ function runJSUnityTests(tests) {
|
|||
}
|
||||
|
||||
try {
|
||||
res = runTest(file);
|
||||
res = runTest(file, true);
|
||||
allResults.push(res);
|
||||
result = result && res.status;
|
||||
} catch (err) {
|
||||
|
@ -69,7 +69,7 @@ function runCommandLineTests(opts) {
|
|||
options = opts || {},
|
||||
jasmineReportFormat = options.jasmineReportFormat || 'progress',
|
||||
unitTests = internal.unitTests(),
|
||||
isSpecRegEx = /.+spec\.js/,
|
||||
isSpecRegEx = /.+-spec.*\.js/,
|
||||
isSpec = function (unitTest) {
|
||||
return isSpecRegEx.test(unitTest);
|
||||
},
|
||||
|
|
|
@ -141,7 +141,8 @@ var optionsDefaults = { "cluster": false,
|
|||
"valgrindargs": [],
|
||||
"valgrindXmlFileBase" : "",
|
||||
"extraargs": [],
|
||||
"coreDirectory": "/var/tmp"
|
||||
"coreDirectory": "/var/tmp",
|
||||
"writeXmlReport": true
|
||||
|
||||
};
|
||||
var allTests =
|
||||
|
@ -866,7 +867,7 @@ function runThere (options, instanceInfo, file) {
|
|||
var t;
|
||||
if (file.indexOf("-spec") === -1) {
|
||||
t = 'var runTest = require("jsunity").runTest; '+
|
||||
'return runTest(' + JSON.stringify(file) + ');';
|
||||
'return runTest(' + JSON.stringify(file) + ', true);';
|
||||
}
|
||||
else {
|
||||
var jasmineReportFormat = options.jasmineReportFormat || 'progress';
|
||||
|
@ -911,7 +912,7 @@ function runHere (options, instanceInfo, file) {
|
|||
try {
|
||||
if (file.indexOf("-spec") === -1) {
|
||||
var runTest = require("jsunity").runTest;
|
||||
result = runTest(file);
|
||||
result = runTest(file, true);
|
||||
}
|
||||
else {
|
||||
var jasmineReportFormat = options.jasmineReportFormat || 'progress';
|
||||
|
@ -1109,6 +1110,7 @@ function single_usage (testsuite, list) {
|
|||
|
||||
testFuncs.single_server = function (options) {
|
||||
var result = { };
|
||||
options.writeXmlReport = false;
|
||||
if (options.test !== undefined) {
|
||||
var instanceInfo = startInstance("tcp", options, [], "single_server");
|
||||
if (instanceInfo === false) {
|
||||
|
@ -1146,6 +1148,7 @@ testFuncs.single_server = function (options) {
|
|||
|
||||
testFuncs.single_localserver = function (options) {
|
||||
var result = { };
|
||||
options.writeXmlReport = false;
|
||||
if (options.test !== undefined) {
|
||||
var instanceInfo;
|
||||
var te = options.test;
|
||||
|
@ -1165,6 +1168,7 @@ testFuncs.single_localserver = function (options) {
|
|||
|
||||
testFuncs.single_client = function (options) {
|
||||
var result = { };
|
||||
options.writeXmlReport = false;
|
||||
if (options.test !== undefined) {
|
||||
var instanceInfo = startInstance("tcp", options, [], "single_client");
|
||||
if (instanceInfo === false) {
|
||||
|
@ -2080,7 +2084,19 @@ testFuncs.authentication_parameters = function (options) {
|
|||
return results;
|
||||
};
|
||||
|
||||
var internalMembers = ["code", "error", "status", "duration", "failed", "total", "crashed", "all_ok", "ok", "message"];
|
||||
var internalMembers = [
|
||||
"code",
|
||||
"error",
|
||||
"status",
|
||||
"duration",
|
||||
"failed",
|
||||
"total",
|
||||
"crashed",
|
||||
"all_ok",
|
||||
"ok",
|
||||
"message",
|
||||
"suiteName"
|
||||
];
|
||||
|
||||
function unitTestPrettyPrintResults(r) {
|
||||
var testrun;
|
||||
|
|
|
@ -1324,7 +1324,7 @@
|
|||
var appsToZip = aal.byExample({type: "app", isSystem: false});
|
||||
while (appsToZip.hasNext()) {
|
||||
tmp = appsToZip.next();
|
||||
path = fs.join(Module._oldAppPath, tmp.path);
|
||||
path = fs.join(Module._oldAppPath(), tmp.path);
|
||||
try {
|
||||
mapAppZip[tmp.app] = fmUtils.zipDirectory(path);
|
||||
} catch (e) {
|
||||
|
@ -1334,7 +1334,7 @@
|
|||
|
||||
// 2. If development mode, Zip all development APPs and create a map name => zipFile
|
||||
|
||||
var devPath = Module._devAppPath;
|
||||
var devPath = Module._devAppPath();
|
||||
var mapDevAppZip = {};
|
||||
var i;
|
||||
if (devPath !== undefined) {
|
||||
|
@ -1354,7 +1354,7 @@
|
|||
// 3. Remove old appPath
|
||||
|
||||
try {
|
||||
fs.removeDirectoryRecursive(Module._oldAppPath, true);
|
||||
fs.removeDirectoryRecursive(Module._oldAppPath(), true);
|
||||
} catch(e) {
|
||||
}
|
||||
|
||||
|
|
|
@ -219,6 +219,7 @@ std::string JsonHelper::toString (TRI_json_t const* json) {
|
|||
int res = TRI_StringifyJson(&buffer, json);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
TRI_DestroyStringBuffer(&buffer);
|
||||
return "";
|
||||
}
|
||||
|
||||
|
@ -378,6 +379,26 @@ namespace triagens {
|
|||
return stream;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append the JSON contents to an output stream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostream& operator<< (std::ostream& stream,
|
||||
TRI_json_t const* json) {
|
||||
stream << JsonHelper::toString(json);
|
||||
return stream;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append the JSON contents to an output stream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostream& operator<< (std::ostream& stream,
|
||||
TRI_json_t const& json) {
|
||||
stream << JsonHelper::toString(&json);
|
||||
return stream;
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1137,6 +1137,8 @@ namespace triagens {
|
|||
|
||||
friend std::ostream& operator<< (std::ostream&, Json const*);
|
||||
friend std::ostream& operator<< (std::ostream&, Json const&);
|
||||
friend std::ostream& operator<< (std::ostream&, TRI_json_t const*);
|
||||
friend std::ostream& operator<< (std::ostream&, TRI_json_t const&);
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- private variables
|
||||
|
|
|
@ -61,14 +61,14 @@ JSLoader::JSLoader () {
|
|||
|
||||
v8::Handle<v8::Value> JSLoader::executeGlobalScript (v8::Isolate* isolate,
|
||||
v8::Handle<v8::Context> context,
|
||||
string const& name) {
|
||||
std::string const& name) {
|
||||
v8::TryCatch tryCatch;
|
||||
v8::EscapableHandleScope scope(isolate);
|
||||
v8::Handle<v8::Value> result;
|
||||
|
||||
findScript(name);
|
||||
|
||||
map<string, string>::iterator i = _scripts.find(name);
|
||||
std::map<std::string, std::string>::iterator i = _scripts.find(name);
|
||||
|
||||
if (i == _scripts.end()) {
|
||||
// correct the path/name
|
||||
|
@ -84,7 +84,7 @@ v8::Handle<v8::Value> JSLoader::executeGlobalScript (v8::Isolate* isolate,
|
|||
|
||||
if (tryCatch.HasCaught()) {
|
||||
if (tryCatch.CanContinue()) {
|
||||
TRI_LogV8Exception(isolate, &tryCatch);/// TODO: could this be the place where we loose the information about parse errors of scripts?
|
||||
TRI_LogV8Exception(isolate, &tryCatch); // TODO: could this be the place where we lose the information about parse errors of scripts?
|
||||
return v8::Undefined(isolate);
|
||||
}
|
||||
else {
|
||||
|
|
23
scripts/run
23
scripts/run
|
@ -1,6 +1,5 @@
|
|||
#!/bin/bash
|
||||
export PID=$$
|
||||
mkdir data-$PID
|
||||
|
||||
self=$0
|
||||
if test -f "${self}.js"; then
|
||||
|
@ -19,6 +18,10 @@ else
|
|||
PS='/'
|
||||
fi;
|
||||
|
||||
LOGFILE="out${PS}log-$PID"
|
||||
DBDIR="out${PS}data-$PID"
|
||||
mkdir -p ${DBDIR}
|
||||
|
||||
export PORT=`expr 1024 + $RANDOM`
|
||||
declare -a ARGS
|
||||
export VG=''
|
||||
|
@ -36,9 +39,9 @@ for i in "$@"; do
|
|||
ARGS+=("$i")
|
||||
fi
|
||||
done
|
||||
mkdir out
|
||||
echo Database has its data in out${PS}data-$PID
|
||||
echo Logfile is in out${PS}log-$PID
|
||||
|
||||
echo Database has its data in ${DBDIR}
|
||||
echo Logfile is in ${LOGFILE}
|
||||
$VG bin/arangod \
|
||||
--configuration none \
|
||||
--cluster.agent-path bin${PS}etcd-arango${EXT} \
|
||||
|
@ -49,8 +52,8 @@ $VG bin/arangod \
|
|||
--cluster.disable-dispatcher-kickstarter false \
|
||||
--cluster.data-path cluster \
|
||||
--cluster.log-path cluster \
|
||||
--database.directory out${PS}data-$PID \
|
||||
--log.file out${PS}log-$PID \
|
||||
--database.directory ${DBDIR} \
|
||||
--log.file ${LOGFILE} \
|
||||
--server.endpoint tcp://127.0.0.1:$PORT \
|
||||
--javascript.startup-directory js \
|
||||
--javascript.app-path js${PS}apps \
|
||||
|
@ -61,11 +64,11 @@ $VG bin/arangod \
|
|||
$VXML
|
||||
|
||||
if test $? -eq 0; then
|
||||
echo removing out${PS}log-$PID out${PS}data-$PID
|
||||
rm -rf out${PS}log-$PID out${PS}data-$PID
|
||||
echo "removing ${LOGFILE} ${DBDIR}"
|
||||
rm -rf ${LOGFILE} ${DBDIR}
|
||||
else
|
||||
echo "failed - don't remove out${PS}log-$PID out${PS}data-$PID - heres the logfile:"
|
||||
cat out${PS}log-$PID
|
||||
echo "failed - don't remove ${LOGFILE} ${DBDIR} - heres the logfile:"
|
||||
cat ${LOGFILE}
|
||||
fi
|
||||
|
||||
echo Server has terminated.
|
||||
|
|
|
@ -156,18 +156,21 @@ function main (argv) {
|
|||
print(JSON.stringify(r));
|
||||
}
|
||||
|
||||
fs.write("out/UNITTEST_RESULT.json", JSON.stringify(r));
|
||||
fs.write("out/UNITTEST_RESULT_SUMMARY.txt", JSON.stringify(! r.crashed));
|
||||
if (options.writeXmlReport) {
|
||||
|
||||
try {
|
||||
resultsToXml(r, "UNITTEST_RESULT_", (options.hasOwnProperty('cluster') && options.cluster));
|
||||
}
|
||||
catch (x) {
|
||||
print("exception while serializing status xml!");
|
||||
print(x.message);
|
||||
print(JSON.stringify(r));
|
||||
}
|
||||
fs.write("out/UNITTEST_RESULT.json", JSON.stringify(r));
|
||||
fs.write("out/UNITTEST_RESULT_SUMMARY.txt", JSON.stringify(! r.crashed));
|
||||
|
||||
try {
|
||||
resultsToXml(r, "UNITTEST_RESULT_", (options.hasOwnProperty('cluster') && options.cluster));
|
||||
}
|
||||
catch (x) {
|
||||
print("exception while serializing status xml!");
|
||||
print(x.message);
|
||||
print(x.stack);
|
||||
print(JSON.stringify(r));
|
||||
}
|
||||
}
|
||||
UnitTest.unitTestPrettyPrintResults(r);
|
||||
|
||||
if (r.hasOwnProperty("crashed") && r.crashed) {
|
||||
|
|
Loading…
Reference in New Issue