1
0
Fork 0

Merge branch 'devel' of ssh://github.com/triAGENS/ArangoDB into devel

This commit is contained in:
Max Neunhoeffer 2014-10-20 17:33:04 +02:00
commit ba41701bf8
19 changed files with 2413 additions and 1222 deletions

View File

@ -494,13 +494,13 @@ Json AqlValue::toJson (triagens::arango::AqlTransaction* trx,
if (TRI_IS_EDGE_MARKER(_marker)) {
// _from
std::string from(trx->resolver()->getCollectionName(TRI_EXTRACT_MARKER_FROM_CID(_marker)));
std::string from(trx->resolver()->getCollectionNameCluster(TRI_EXTRACT_MARKER_FROM_CID(_marker)));
from.push_back('/');
from.append(TRI_EXTRACT_MARKER_FROM_KEY(_marker));
json(TRI_VOC_ATTRIBUTE_FROM, Json(from));
// _to
std::string to(trx->resolver()->getCollectionName(TRI_EXTRACT_MARKER_TO_CID(_marker)));
std::string to(trx->resolver()->getCollectionNameCluster(TRI_EXTRACT_MARKER_TO_CID(_marker)));
to.push_back('/');
to.append(TRI_EXTRACT_MARKER_TO_KEY(_marker));
json(TRI_VOC_ATTRIBUTE_TO, Json(to));
@ -605,13 +605,13 @@ Json AqlValue::extractArrayMember (triagens::arango::AqlTransaction* trx,
return Json(TRI_UNKNOWN_MEM_ZONE, JsonHelper::uint64String(TRI_UNKNOWN_MEM_ZONE, rid));
}
else if (strcmp(name, TRI_VOC_ATTRIBUTE_FROM) == 0) {
std::string from(trx->resolver()->getCollectionName(TRI_EXTRACT_MARKER_FROM_CID(_marker)));
std::string from(trx->resolver()->getCollectionNameCluster(TRI_EXTRACT_MARKER_FROM_CID(_marker)));
from.push_back('/');
from.append(TRI_EXTRACT_MARKER_FROM_KEY(_marker));
return Json(TRI_UNKNOWN_MEM_ZONE, from);
}
else if (strcmp(name, TRI_VOC_ATTRIBUTE_TO) == 0) {
std::string to(trx->resolver()->getCollectionName(TRI_EXTRACT_MARKER_TO_CID(_marker)));
std::string to(trx->resolver()->getCollectionNameCluster(TRI_EXTRACT_MARKER_TO_CID(_marker)));
to.push_back('/');
to.append(TRI_EXTRACT_MARKER_TO_KEY(_marker));
return Json(TRI_UNKNOWN_MEM_ZONE, to);

View File

@ -31,6 +31,7 @@
#include "Aql/ExecutionEngine.h"
#include "Basics/StringUtils.h"
#include "Cluster/ClusterInfo.h"
#include "Cluster/ClusterMethods.h"
#include "Utils/Exception.h"
#include "VocBase/document-collection.h"
#include "VocBase/transaction.h"
@ -80,8 +81,12 @@ Collection::~Collection () {
size_t Collection::count () const {
if (numDocuments == UNINITIALIZED) {
if (ExecutionEngine::isCoordinator()) {
/// TODO: determine the proper number of documents in the coordinator case
numDocuments = 1000;
uint64_t result;
int res = triagens::arango::countOnCoordinator(vocbase->_name, name, result);
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, "could not determine number of documents in collection");
}
numDocuments = static_cast<int64_t>(result);
}
else {
auto document = documentCollection();
@ -197,6 +202,29 @@ void Collection::fillIndexes () const {
}
}
}
else if (ExecutionEngine::isDBServer()) {
TRI_ASSERT(collection != nullptr);
auto document = documentCollection();
// lookup collection in agency by plan id
auto clusterInfo = triagens::arango::ClusterInfo::instance();
auto collectionInfo = clusterInfo->getCollection(std::string(vocbase->_name), triagens::basics::StringUtils::itoa(document->_info._planId));
if (collectionInfo.get() == nullptr || (*collectionInfo).empty()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "collection not found");
}
TRI_json_t const* json = (*collectionInfo).getIndexes();
size_t const n = document->_allIndexes._length;
indexes.reserve(n);
// register indexes
for (size_t i = 0; i < n; ++i) {
TRI_json_t const* v = TRI_LookupListJson(json, i);
if (v != nullptr) {
indexes.emplace_back(new Index(v));
}
}
}
else {
// local collection
TRI_ASSERT(collection != nullptr);

View File

@ -235,19 +235,28 @@ int ExecutionBlock::initialize () {
////////////////////////////////////////////////////////////////////////////////
int ExecutionBlock::shutdown () {
for (auto it = _dependencies.begin(); it != _dependencies.end(); ++it) {
int res = (*it)->shutdown();
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
int ret = TRI_ERROR_NO_ERROR;
int res;
for (auto it = _buffer.begin(); it != _buffer.end(); ++it) {
delete *it;
}
_buffer.clear();
return TRI_ERROR_NO_ERROR;
for (auto it = _dependencies.begin(); it != _dependencies.end(); ++it) {
try {
res = (*it)->shutdown();
}
catch (...) {
ret = TRI_ERROR_INTERNAL;
}
if (res != TRI_ERROR_NO_ERROR) {
ret = res;
}
}
return ret;
}
////////////////////////////////////////////////////////////////////////////////
@ -803,6 +812,7 @@ IndexRangeBlock::IndexRangeBlock (ExecutionEngine* engine,
_allBoundsConstant(true) {
std::vector<std::vector<RangeInfo>> const& orRanges = en->_ranges;
TRI_ASSERT(en->_index != nullptr);
TRI_ASSERT(orRanges.size() == 1); // OR expressions not yet implemented
@ -888,7 +898,6 @@ int IndexRangeBlock::initialize () {
}
bool IndexRangeBlock::readIndex () {
// This is either called from initialize if all bounds are constant,
// in this case it is never called again. If there is at least one
// variable bound, then readIndex is called once for every item coming
@ -908,6 +917,8 @@ bool IndexRangeBlock::readIndex () {
auto en = static_cast<IndexRangeNode const*>(getPlanNode());
IndexOrCondition const* condition = &en->_ranges;
TRI_ASSERT(en->_index != nullptr);
std::unique_ptr<IndexOrCondition> newCondition;
// Find out about the actual values for the bounds in the variable bound case:
@ -4227,20 +4238,33 @@ size_t DistributeBlock::sendToClient (AqlValue val) {
/// @brief local helper to throw an exception if a HTTP request went wrong
////////////////////////////////////////////////////////////////////////////////
static void throwExceptionAfterBadSyncRequest (ClusterCommResult* res,
static bool throwExceptionAfterBadSyncRequest (ClusterCommResult* res,
bool isShutdown) {
if (res->status == CL_COMM_TIMEOUT) {
std::string errorMessage;
errorMessage += std::string("Timeout in communication with shard '") +
std::string(res->shardID) +
std::string("' on cluster node '") +
std::string(res->serverID) +
std::string("' failed.");
// No reply, we give up:
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_TIMEOUT,
"timeout in cluster AQL operation");
errorMessage);
}
if (res->status == CL_COMM_ERROR) {
std::string errorMessage;
// This could be a broken connection or an Http error:
if (res->result == nullptr || ! res->result->isComplete()) {
// there is no result
errorMessage += std::string("Empty result in communication with shard '") +
std::string(res->shardID) +
std::string("' on cluster node '") +
std::string(res->serverID) +
std::string("' failed.");
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_CONNECTION_LOST,
"lost connection within cluster");
errorMessage);
}
StringBuffer const& responseBodyBuf(res->result->getBody());
@ -4248,20 +4272,39 @@ static void throwExceptionAfterBadSyncRequest (ClusterCommResult* res,
// extract error number and message from response
int errorNum = TRI_ERROR_NO_ERROR;
std::string errorMessage;
TRI_json_t* json = TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, responseBodyBuf.c_str());
if (TRI_IsArrayJson(json)) {
TRI_json_t const* v;
if (JsonHelper::getBooleanValue(json, "error", true)) {
errorNum = TRI_ERROR_INTERNAL;
errorMessage += std::string("Error message received from shard '") +
std::string(res->shardID) +
std::string("' on cluster node '") +
std::string(res->serverID) +
std::string("': ");
}
if (TRI_IsArrayJson(json)) {
TRI_json_t const* v = TRI_LookupArrayJson(json, "errorNum");
v = TRI_LookupArrayJson(json, "errorNum");
if (TRI_IsNumberJson(v)) {
if (static_cast<int>(v->_value._number) != TRI_ERROR_NO_ERROR) {
/* if we've got an error num, error has to be true. */
TRI_ASSERT(errorNum == TRI_ERROR_INTERNAL);
errorNum = static_cast<int>(v->_value._number);
}
}
v = TRI_LookupArrayJson(json, "errorMessage");
if (TRI_IsStringJson(v)) {
errorMessage = std::string(v->_value._string.data, v->_value._string.length - 1);
errorMessage += std::string(v->_value._string.data, v->_value._string.length - 1);
}
else {
errorMessage += std::string("(No valid error in response)");
}
}
else {
errorMessage += std::string("(No valid response)");
}
if (json != nullptr) {
@ -4271,10 +4314,10 @@ static void throwExceptionAfterBadSyncRequest (ClusterCommResult* res,
if (isShutdown &&
errorNum == TRI_ERROR_QUERY_NOT_FOUND) {
// this error may happen on shutdown and is thus tolerated
return;
// pass the info to the caller who can opt to ignore this error
return true;
}
// In this case a proper HTTP error was reported by the DBserver,
if (errorNum > 0 && ! errorMessage.empty()) {
THROW_ARANGO_EXCEPTION_MESSAGE(errorNum, errorMessage);
@ -4283,6 +4326,8 @@ static void throwExceptionAfterBadSyncRequest (ClusterCommResult* res,
// default error
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_AQL_COMMUNICATION);
}
return false;
}
////////////////////////////////////////////////////////////////////////////////
@ -4406,7 +4451,10 @@ int RemoteBlock::shutdown () {
res.reset(sendRequest(rest::HttpRequest::HTTP_REQUEST_PUT,
"/_api/aql/shutdown/",
string()));
throwExceptionAfterBadSyncRequest(res.get(), true);
if (throwExceptionAfterBadSyncRequest(res.get(), true)) {
// artificially ignore error in case query was not found during shutdown
return TRI_ERROR_NO_ERROR;
}
// If we get here, then res->result is the response which will be
// a serialized AqlItemBlock:
@ -4414,6 +4462,7 @@ int RemoteBlock::shutdown () {
Json responseBodyJson(TRI_UNKNOWN_MEM_ZONE,
TRI_JsonString(TRI_UNKNOWN_MEM_ZONE,
responseBodyBuf.begin()));
return JsonHelper::getNumericValue<int>
(responseBodyJson.json(), "code", TRI_ERROR_INTERNAL);
}

View File

@ -188,6 +188,14 @@ bool ExecutionEngine::isCoordinator () {
return triagens::arango::ServerState::instance()->isCoordinator();
}
////////////////////////////////////////////////////////////////////////////////
// @brief whether or not we are a db server
////////////////////////////////////////////////////////////////////////////////
bool ExecutionEngine::isDBServer () {
return triagens::arango::ServerState::instance()->isDBserver();
}
// -----------------------------------------------------------------------------
// --SECTION-- walker class for ExecutionNode to instanciate
// -----------------------------------------------------------------------------
@ -331,6 +339,11 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
Query* otherQuery = query->clone(PART_DEPENDENT);
otherQuery->engine(engine);
int res = otherQuery->trx()->begin();
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, "could not begin transaction");
}
auto* newPlan = new ExecutionPlan(otherQuery->ast());
otherQuery->setPlan(newPlan);
@ -514,6 +527,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
// pick up the remote query ids
std::unordered_map<std::string, std::string> queryIds;
std::string error;
int count = 0;
int nrok = 0;
for (count = (int) shardIds.size(); count > 0; count--) {
@ -538,6 +552,13 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
std::cout << "DB SERVER ANSWERED WITH ERROR: " << res->answer->body() << "\n";
}
}
else {
error += std::string("Communication with shard '") +
std::string(res->shardID) +
std::string("' on cluster node '") +
std::string(res->serverID) +
std::string("' failed.");
}
delete res;
}
@ -545,7 +566,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
if (nrok != (int) shardIds.size()) {
// TODO: provide sensible error message with more details
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "did not receive response from all shards");
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, error);
}
return queryIds;
@ -743,10 +764,10 @@ ExecutionEngine* ExecutionEngine::instanciateFromPlan (QueryRegistry* queryRegis
}
TRI_ASSERT(root != nullptr);
engine->_root = root;
root->initialize();
root->initializeCursor(nullptr, 0);
engine->_root = root;
return engine;
}

View File

@ -78,6 +78,12 @@ namespace triagens {
static bool isCoordinator ();
////////////////////////////////////////////////////////////////////////////////
// @brief whether or not we are a DB server
////////////////////////////////////////////////////////////////////////////////
static bool isDBServer ();
////////////////////////////////////////////////////////////////////////////////
// @brief create an execution engine from a plan
////////////////////////////////////////////////////////////////////////////////
@ -126,8 +132,11 @@ namespace triagens {
////////////////////////////////////////////////////////////////////////////////
int shutdown () {
if (_root != nullptr) {
return _root->shutdown();
}
else return 0;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief getSome

View File

@ -1304,6 +1304,7 @@ IndexRangeNode::IndexRangeNode (ExecutionPlan* plan,
_collection(plan->getAst()->query()->collections()->get(JsonHelper::checkAndGetStringValue(json.json(),
"collection"))),
_outVariable(varFromJson(plan->getAst(), json, "outVariable")),
_index(nullptr),
_ranges(),
_reverse(false) {
@ -1324,6 +1325,10 @@ IndexRangeNode::IndexRangeNode (ExecutionPlan* plan,
_index = _collection->getIndex(iid);
_reverse = JsonHelper::checkAndGetBooleanValue(json.json(), "reverse");
if (_index == nullptr) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "index not found");
}
}
ExecutionNode::IndexMatch IndexRangeNode::MatchesIndex (IndexMatchVec const& pattern) const {

View File

@ -299,6 +299,8 @@ Query* Query::clone (QueryPart part) {
}
}
TRI_ASSERT(clone->_trx == nullptr);
clone->_trx = _trx->clone(); // A daughter transaction which does not
// actually lock the collections
return clone.release();
@ -550,7 +552,7 @@ QueryResult Query::execute (QueryRegistry* registry) {
AqlValue val = value->getValue(i, 0);
if (! val.isEmpty()) {
json.add(val.toJson(trx(), doc));
json.add(val.toJson(_trx, doc));
}
}
delete value;
@ -974,16 +976,16 @@ std::string Query::getStateString () const {
void Query::cleanupPlanAndEngine () {
if (_engine != nullptr) {
_engine->shutdown();
delete _engine;
_engine = nullptr;
}
if (_trx != nullptr) {
// TODO: this doesn't unblock the collection on the coordinator. Y?
_trx->abort();
}
delete _trx;
_trx = nullptr;
}
if (_parser != nullptr) {
delete _parser;

View File

@ -344,8 +344,8 @@ namespace triagens {
/// @brief return the transaction, if prepared
////////////////////////////////////////////////////////////////////////////////
triagens::arango::AqlTransaction* trx () {
return &*_trx;
inline triagens::arango::AqlTransaction* trx () {
return _trx;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -40,7 +40,6 @@
#include "GeneralServer/GeneralServer.h"
#include "VocBase/server.h"
//#include "V8Server/v8-vocbaseprivate.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionBlock.h"
@ -111,16 +110,18 @@ std::string const& RestAqlHandler::queue () const {
void RestAqlHandler::createQueryFromJson () {
Json queryJson(TRI_UNKNOWN_MEM_ZONE, parseJsonBody());
if (queryJson.isEmpty()) {
LOG_ERROR("Invalid JSON Plan in Query");
return;
}
std::cout << "createQueryFromJson" << queryJson.toString() << std::endl;
std::cout << "createQueryFromJson: " << queryJson.toString() << std::endl;
Json plan;
Json options;
plan = queryJson.get("plan").copy(); // cannot throw
if (plan.isEmpty()) {
LOG_ERROR("Invalid JSON: \"plan\"-Attribute missing.");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"body must be an object with attribute \"plan\"");
return;
@ -132,6 +133,8 @@ void RestAqlHandler::createQueryFromJson () {
auto query = new Query(_applicationV8, false, _vocbase, plan, options.steal(), (part == "main" ? PART_MAIN : PART_DEPENDENT));
QueryResult res = query->prepare(_queryRegistry);
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_ERROR("Failed to instanciate the Query: %s", res.details.c_str());
generateError(HttpResponse::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN,
res.details);
delete query;
@ -153,6 +156,8 @@ void RestAqlHandler::createQueryFromJson () {
_queryRegistry->insert(_vocbase, _qId, query, ttl);
}
catch (...) {
LOG_ERROR("could not keep query in registry");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"could not keep query in registry");
delete query;
@ -180,11 +185,13 @@ void RestAqlHandler::createQueryFromJson () {
void RestAqlHandler::parseQuery () {
Json queryJson(TRI_UNKNOWN_MEM_ZONE, parseJsonBody());
if (queryJson.isEmpty()) {
LOG_ERROR("Invalid JSON Plan in Query");
return;
}
std::string const queryString = JsonHelper::getStringValue(queryJson.json(), "query", "");
if (queryString.empty()) {
LOG_ERROR("body must be an object with attribute \"query\"");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"body must be an object with attribute \"query\"");
return;
@ -194,6 +201,7 @@ void RestAqlHandler::parseQuery () {
nullptr, nullptr, PART_MAIN);
QueryResult res = query->parse();
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_ERROR("Failed to instanciate the Query: %s", res.details.c_str());
generateError(HttpResponse::BAD, res.code, res.details);
delete query;
return;
@ -236,6 +244,7 @@ void RestAqlHandler::explainQuery () {
std::string queryString = JsonHelper::getStringValue(queryJson.json(), "query", "");
if (queryString.empty()) {
LOG_ERROR("body must be an object with attribute \"query\"");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"body must be an object with attribute \"query\"");
return;
@ -250,6 +259,7 @@ void RestAqlHandler::explainQuery () {
parameters.steal(), options.steal(), PART_MAIN);
QueryResult res = query->explain();
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_ERROR("Failed to instanciate the Query: %s", res.details.c_str());
generateError(HttpResponse::BAD, res.code, res.details);
delete query;
return;
@ -289,6 +299,7 @@ void RestAqlHandler::createQueryFromString () {
std::string const queryString = JsonHelper::getStringValue(queryJson.json(), "query", "");
if (queryString.empty()) {
LOG_ERROR("body must be an object with attribute \"query\"");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"body must be an object with attribute \"query\"");
return;
@ -296,6 +307,7 @@ void RestAqlHandler::createQueryFromString () {
std::string const part = JsonHelper::getStringValue(queryJson.json(), "part", "");
if (part.empty()) {
LOG_ERROR("body must be an object with attribute \"part\"");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"body must be an object with attribute \"part\"");
return;
@ -310,6 +322,7 @@ void RestAqlHandler::createQueryFromString () {
parameters.steal(), options.steal(), (part == "main" ? PART_MAIN : PART_DEPENDENT));
QueryResult res = query->prepare(_queryRegistry);
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_ERROR("Failed to instanciate the Query: %s", res.details.c_str());
generateError(HttpResponse::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN,
res.details);
delete query;
@ -329,6 +342,7 @@ void RestAqlHandler::createQueryFromString () {
_queryRegistry->insert(_vocbase, _qId, query, ttl);
}
catch (...) {
LOG_ERROR("could not keep query in registry");
generateError(HttpResponse::BAD, TRI_ERROR_INTERNAL,
"could not keep query in registry");
delete query;
@ -432,7 +446,7 @@ void RestAqlHandler::useQuery (std::string const& operation,
}
catch (triagens::arango::Exception const& ex) {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Failed during use of Query: %s", ex.message().c_str());
generateError(HttpResponse::SERVER_ERROR,
ex.code(),
ex.message());
@ -440,12 +454,15 @@ void RestAqlHandler::useQuery (std::string const& operation,
catch (std::exception const& ex) {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Failed during use of Query: %s", ex.what());
generateError(HttpResponse::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
ex.what());
}
catch (...) {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Failed during use of Query: Unknown exeption occured");
generateError(HttpResponse::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
@ -531,13 +548,14 @@ void RestAqlHandler::getInfoQuery (std::string const& operation,
}
else {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Referenced qery not found");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
return;
}
}
catch (triagens::arango::Exception const& ex) {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Failed during use of Query: %s", ex.message().c_str());
generateError(HttpResponse::SERVER_ERROR,
ex.code(),
ex.message());
@ -545,6 +563,8 @@ void RestAqlHandler::getInfoQuery (std::string const& operation,
catch (std::exception const& ex) {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Failed during use of Query: %s", ex.what());
generateError(HttpResponse::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
ex.what());
@ -552,6 +572,8 @@ void RestAqlHandler::getInfoQuery (std::string const& operation,
catch (...) {
_queryRegistry->close(_vocbase, _qId);
LOG_ERROR("Failed during use of Query: Unknown exeption occured");
generateError(HttpResponse::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"an unknown exception occurred");
@ -582,6 +604,7 @@ triagens::rest::HttpHandler::status_t RestAqlHandler::execute () {
switch (type) {
case HttpRequest::HTTP_REQUEST_POST: {
if (suffix.size() != 1) {
LOG_ERROR("Empty POST!");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
}
else if (suffix[0] == "instanciate") {
@ -597,12 +620,14 @@ triagens::rest::HttpHandler::status_t RestAqlHandler::execute () {
createQueryFromString();
}
else {
LOG_ERROR("Unknown API");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
}
break;
}
case HttpRequest::HTTP_REQUEST_PUT: {
if (suffix.size() != 2) {
LOG_ERROR("unknown PUT API");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
}
else {
@ -612,6 +637,7 @@ triagens::rest::HttpHandler::status_t RestAqlHandler::execute () {
}
case HttpRequest::HTTP_REQUEST_GET: {
if (suffix.size() != 2) {
LOG_ERROR("Unknown GET API");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
}
else {
@ -624,6 +650,7 @@ triagens::rest::HttpHandler::status_t RestAqlHandler::execute () {
case HttpRequest::HTTP_REQUEST_PATCH:
case HttpRequest::HTTP_REQUEST_OPTIONS:
case HttpRequest::HTTP_REQUEST_ILLEGAL: {
LOG_ERROR("Unknown HTTP-method for /_api/aql");
generateError(HttpResponse::METHOD_NOT_ALLOWED,
TRI_ERROR_NOT_IMPLEMENTED,
"illegal method for /_api/aql");
@ -651,12 +678,14 @@ bool RestAqlHandler::findQuery (std::string const& idString,
}
catch (...) {
_qId = 0;
LOG_ERROR("Query not found.");
generateError(HttpResponse::FORBIDDEN, TRI_ERROR_QUERY_IN_USE);
return true;
}
if (query == nullptr) {
_qId = 0;
LOG_ERROR("Query not found.");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_QUERY_NOT_FOUND);
return true;
}
@ -712,6 +741,7 @@ void RestAqlHandler::handleUseQuery (std::string const& operation,
//std::cout << "ANSWERBODY: " << JsonHelper::toString(answerBody.json()) << "\n\n";
}
catch (...) {
LOG_ERROR("cannot transform AqlItemBlock to Json");
generateError(HttpResponse::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"cannot transform AqlItemBlock to Json");
return;
@ -737,6 +767,7 @@ void RestAqlHandler::handleUseQuery (std::string const& operation,
}
}
catch (...) {
LOG_ERROR("skipSome lead to an exception");
generateError(HttpResponse::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"skipSome lead to an exception");
return;
@ -764,6 +795,7 @@ void RestAqlHandler::handleUseQuery (std::string const& operation,
("error", Json(false));
}
catch (...) {
LOG_ERROR("skip lead to an exception");
generateError(HttpResponse::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"skip lead to an exception");
return;
@ -784,6 +816,7 @@ void RestAqlHandler::handleUseQuery (std::string const& operation,
}
}
catch (...) {
LOG_ERROR("initializeCursor lead to an exception");
generateError(HttpResponse::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"initializeCursor lead to an exception");
return;
@ -798,6 +831,7 @@ void RestAqlHandler::handleUseQuery (std::string const& operation,
_queryRegistry->destroy(_vocbase, _qId);
}
catch (...) {
LOG_ERROR("shutdown lead to an exception");
generateError(HttpResponse::SERVER_ERROR, TRI_ERROR_HTTP_SERVER_ERROR,
"shutdown lead to an exception");
return;
@ -806,6 +840,7 @@ void RestAqlHandler::handleUseQuery (std::string const& operation,
("code", Json(static_cast<double>(res)));
}
else {
LOG_ERROR("Unknown operation!");
generateError(HttpResponse::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND);
return;
}
@ -825,11 +860,13 @@ TRI_json_t* RestAqlHandler::parseJsonBody () {
if (json == nullptr) {
if (errmsg == nullptr) {
LOG_ERROR("cannot parse json object");
generateError(HttpResponse::BAD,
TRI_ERROR_HTTP_CORRUPTED_JSON,
"cannot parse json object");
}
else {
LOG_ERROR("cannot parse json object: %s", errmsg);
generateError(HttpResponse::BAD,
TRI_ERROR_HTTP_CORRUPTED_JSON,
errmsg);
@ -844,6 +881,7 @@ TRI_json_t* RestAqlHandler::parseJsonBody () {
if (! TRI_IsArrayJson(json)) {
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, json);
LOG_ERROR("body of request must be a JSON array");
generateError(HttpResponse::BAD, TRI_ERROR_HTTP_BAD_PARAMETER,
"body of request must be a JSON array");
return nullptr;

View File

@ -338,7 +338,7 @@ ClusterCommResult* ClusterComm::syncRequest (
res->result = client->request(reqtype, path, body.c_str(), body.size(),
headersCopy);
if (! res->result->isComplete()) {
if (res->result == nullptr || ! res->result->isComplete()) {
cm->brokenConnection(connection);
if (client->getErrorMessage() == "Request timeout reached") {
res->status = CL_COMM_TIMEOUT;
@ -723,7 +723,7 @@ void ClusterComm::asyncAnswer (string& coordinatorHeader,
httpclient::SimpleHttpResult* result =
client->request(rest::HttpRequest::HTTP_REQUEST_PUT,
"/_api/shard-comm", body, len, headers);
if (! result->isComplete()) {
if (result == nullptr || ! result->isComplete()) {
cm->brokenConnection(connection);
}
else {
@ -1011,17 +1011,17 @@ void ClusterCommThread::run () {
// We add this result to the operation struct without acquiring
// a lock, since we know that only we do such a thing:
if (0 != op->body) {
if (nullptr != op->body) {
op->result = client->request(op->reqtype, op->path,
op->body->c_str(), op->body->size(),
*(op->headerFields));
}
else {
op->result = client->request(op->reqtype, op->path,
NULL, 0, *(op->headerFields));
nullptr, 0, *(op->headerFields));
}
if (! op->result->isComplete()) {
if (op->result == nullptr || ! op->result->isComplete()) {
cm->brokenConnection(connection);
if (client->getErrorMessage() == "Request timeout reached") {
op->status = CL_COMM_TIMEOUT;
@ -1042,7 +1042,7 @@ void ClusterCommThread::run () {
}
}
if (!cc->moveFromSendToReceived(op->operationID)) {
if (! cc->moveFromSendToReceived(op->operationID)) {
// It was dropped in the meantime, so forget about it:
delete op;
}

View File

@ -90,7 +90,7 @@ namespace triagens {
////////////////////////////////////////////////////////////////////////////////
bool empty () const {
return (0 == _json); //|| (id() == 0);
return (nullptr == _json); //|| (id() == 0);
}
////////////////////////////////////////////////////////////////////////////////
@ -189,11 +189,11 @@ namespace triagens {
TRI_json_t* keyOptions () const {
TRI_json_t const* keyOptions = triagens::basics::JsonHelper::getArrayElement(_json, "keyOptions");
if (keyOptions != 0) {
if (keyOptions != nullptr) {
return TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, keyOptions);
}
return 0;
return nullptr;
}
////////////////////////////////////////////////////////////////////////////////
@ -203,7 +203,7 @@ namespace triagens {
bool allowUserKeys () const {
TRI_json_t const* keyOptions = triagens::basics::JsonHelper::getArrayElement(_json, "keyOptions");
if (keyOptions != 0) {
if (keyOptions != nullptr) {
return triagens::basics::JsonHelper::getBooleanValue(keyOptions, "allowUserKeys", true);
}
@ -422,7 +422,7 @@ namespace triagens {
TRI_json_t* _json = it->second;
b = triagens::basics::JsonHelper::getBooleanValue(_json,
name, false);
m.insert(make_pair(it->first,b));
m.insert(make_pair(it->first, b));
}
return m;
}
@ -539,15 +539,12 @@ namespace triagens {
= triagens::basics::JsonHelper::getArrayElement
(_json, "keyOptions");
if (keyOptions != 0) {
if (keyOptions != nullptr) {
return TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE, keyOptions);
}
}
return 0;
}
else {
return 0;
}
return nullptr;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -117,7 +117,6 @@ bool ServerJob::cancel (bool running) {
////////////////////////////////////////////////////////////////////////////////
bool ServerJob::execute () {
// default to system database
TRI_vocbase_t* vocbase = TRI_UseDatabaseServer(_server, TRI_VOC_SYSTEM_DATABASE);
@ -126,6 +125,7 @@ bool ServerJob::execute () {
return false;
}
// only one plan change at a time
MUTEX_LOCKER(ExecutorLock);
ApplicationV8::V8Context* context = _applicationV8->enterContext("STANDARD", vocbase, false, true);
@ -135,14 +135,17 @@ bool ServerJob::execute () {
return false;
}
{
try {
v8::HandleScope scope;
// execute script inside the context
char const* file = "handle-plan-change";
char const* content = "require('org/arangodb/cluster').handlePlanChange();";
TRI_ExecuteJavaScriptString(v8::Context::GetCurrent(), v8::String::New(content), v8::String::New(file), false);
TRI_ExecuteJavaScriptString(v8::Context::GetCurrent(), v8::String::New(content, (int) strlen(content)), v8::String::New(file), false);
}
catch (...) {
}
// get the pointer to the last used vocbase
TRI_v8_global_t* v8g = static_cast<TRI_v8_global_t*>(context->_isolate->GetData());

View File

@ -263,6 +263,7 @@ namespace triagens {
}
#ifdef TRI_ENABLE_MAINTAINER_MODE
TRI_ASSERT(_numberTrxActive == _numberTrxInScope);
TRI_ASSERT(_numberTrxActive > 0);
_numberTrxActive--; // Every transaction gets here at most once
#endif
return TRI_ERROR_NO_ERROR;
@ -272,6 +273,7 @@ namespace triagens {
#ifdef TRI_ENABLE_MAINTAINER_MODE
TRI_ASSERT(_numberTrxActive == _numberTrxInScope);
TRI_ASSERT(_numberTrxActive > 0);
_numberTrxActive--; // Every transaction gets here at most once
#endif
@ -295,6 +297,7 @@ namespace triagens {
#ifdef TRI_ENABLE_MAINTAINER_MODE
TRI_ASSERT(_numberTrxActive == _numberTrxInScope);
TRI_ASSERT(_numberTrxActive > 0);
_numberTrxActive--; // Every transaction gets here at most once
#endif
return TRI_ERROR_NO_ERROR;
@ -304,6 +307,7 @@ namespace triagens {
#ifdef TRI_ENABLE_MAINTAINER_MODE
TRI_ASSERT(_numberTrxActive == _numberTrxInScope);
TRI_ASSERT(_numberTrxActive > 0);
_numberTrxActive--; // Every transaction gets here at most once
#endif

View File

@ -31,6 +31,7 @@
#define ARANGODB_VOC_BASE_VOC__TYPES_H 1
#include "Basics/Common.h"
#include "Cluster/ServerState.h"
// -----------------------------------------------------------------------------
// --SECTION-- public defines
@ -225,6 +226,8 @@ namespace triagens {
static void increaseNumbers (int numberInScope, int numberActive) {
#ifdef TRI_ENABLE_MAINTAINER_MODE
TRI_ASSERT(_numberTrxInScope + numberInScope >= 0);
TRI_ASSERT(_numberTrxActive + numberActive >= 0);
_numberTrxInScope += numberInScope;
_numberTrxActive += numberActive;
#endif

View File

@ -1,7 +1,7 @@
/*jshint browser: true */
/*jshint unused: false */
/*global describe, beforeEach, afterEach, it, spyOn, expect, jQuery, _, jqconsole, $*/
/*global arangoHelper, ace*/
/*global arangoHelper, ace, window, document, localStorage, Joi*/
(function() {
@ -9,9 +9,11 @@
describe("The query view", function() {
var view, div, div2, jQueryDummy;
var view, div, div2, jQueryDummy, collectionDummy,
localStorageFake;
beforeEach(function() {
spyOn($, "ajax");
window.App = {
notificationList: {
add: function() {
@ -19,6 +21,38 @@
}
}
};
localStorageFake = {
value: undefined
};
spyOn(localStorage, "getItem").andCallFake(function() {
return localStorageFake.value;
});
var DummyModel = function(vals) {
this.get = function (attr) {
return vals[attr];
};
};
collectionDummy = {
list: [],
fetch: function() {
throw "Should be a spy";
},
add: function(item) {
this.list.push(new DummyModel(item));
},
each: function(func) {
return this.list.forEach(func);
},
saveCollectionQueries: function() {
throw "Should be a spy";
},
findWhere: function(ex) {
}
};
spyOn(collectionDummy, "fetch");
spyOn(collectionDummy, "saveCollectionQueries");
spyOn(window.App.notificationList, "add");
@ -27,6 +61,7 @@
document.body.appendChild(div);
view = new window.queryView({
collection: collectionDummy
});
window.modalView = new window.ModalView();
@ -60,24 +95,23 @@
'click #clearQueryButton': 'clearInput',
'click #addAQL': 'addAQL',
'change #querySelect': 'importSelected',
'change #querySize': 'changeSize',
'keypress #aqlEditor': 'aqlShortcuts',
'click #arangoQueryTable .table-cell0': 'editCustomQuery',
'click #arangoQueryTable .table-cell1': 'editCustomQuery',
'click #arangoQueryTable .table-cell2 a': 'deleteAQL',
'click #confirmQueryImport': 'importCustomQueries',
'click #confirmQueryExport': 'exportCustomQueries'
'click #confirmQueryExport': 'exportCustomQueries',
'click #downloadQueryResult': 'downloadQueryResult',
'click #importQueriesToggle': 'showImportMenu'
};
expect(events).toEqual(view.events);
});
it("should execute all functions when view initializes", function () {
spyOn(view, "getAQL");
spyOn(localStorage, "setItem");
view.initialize();
expect(view.tableDescription.rows).toEqual(view.customQueries);
expect(view.getAQL).toHaveBeenCalled();
expect(localStorage.setItem).toHaveBeenCalled();
});
it("should create a custom query modal", function() {
@ -86,7 +120,13 @@
spyOn(window.modalView, "show");
view.createCustomQueryModal();
expect(window.modalView.createTextEntry).toHaveBeenCalledWith(
'new-query-name', 'Name', '', undefined, undefined, false, /[<>&'"]/
'new-query-name', 'Name', '', undefined, undefined, false,
[
{
rule: Joi.string().required(),
msg: "No query name given."
}
]
);
expect(window.modalView.createSuccessButton).toHaveBeenCalled();
expect(window.modalView.show).toHaveBeenCalled();
@ -117,9 +157,7 @@
name: "123123123",
value: "for var yx do something"
}];
localStorage.setItem("customQueries", JSON.stringify(customQueries));
view.initialize();
spyOn(localStorage, "getItem");
localStorageFake.value = JSON.stringify(customQueries);
view.getAQL();
expect(localStorage.getItem).toHaveBeenCalledWith("customQueries");
expect(view.customQueries).toEqual(customQueries);
@ -225,8 +263,10 @@
name: "myname",
value: "for var yx do something"
}];
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake.value = JSON.stringify(customQueries);
view.initialize();
expect(localStorage.getItem).toHaveBeenCalledWith("customQueries");
jQueryDummy = {
removeClass: function () {
@ -263,7 +303,7 @@
}], e = {
target: "dontcare"
};
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake.value = JSON.stringify(customQueries);
spyOn(view, "switchTab");
spyOn(view, "deselect");
@ -283,7 +323,8 @@
}], e = {
target: "dontcare"
};
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake.value = JSON.stringify(customQueries);
view.initialize();
spyOn(view, "renderSelectboxes");
@ -306,7 +347,7 @@
target: "dontcare",
stopPropagation: function() {throw "Should be a spy";}
};
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake.value = JSON.stringify(customQueries);
view.initialize();
div2 = document.createElement("div");
@ -338,7 +379,7 @@
throw "Should be a spy";
}
};
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake.value = JSON.stringify(customQueries);
view.initialize();
div2 = document.createElement("div");
@ -370,7 +411,7 @@
throw "Should be a spy";
}
};
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake = JSON.stringify(customQueries);
view.initialize();
div2 = document.createElement("div");
@ -402,7 +443,7 @@
throw "Should be a spy";
}
};
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake = JSON.stringify(customQueries);
view.initialize();
div2 = document.createElement("div");
@ -430,7 +471,7 @@
value: "for var yx do something"
}],
returnValue;
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake = JSON.stringify(customQueries);
view.initialize();
returnValue = view.getCustomQueryValueByName("hallotest");
@ -442,7 +483,7 @@
div2.id = "test123";
document.body.appendChild(div2);
localStorage.setItem("querySize", 5000);
localStorageFake = 5000;
view.initialize();
spyOn(localStorage, "getItem");
@ -489,7 +530,7 @@
}
};
$('#findme').val('findme');
localStorage.setItem("customQueries", JSON.stringify(customQueries));
localStorageFake = JSON.stringify(customQueries);
view.initialize();
view.importSelected(e);

View File

@ -5860,6 +5860,7 @@ function GENERAL_GRAPH_NEIGHBORS (graphName,
if (options.hasOwnProperty("neighborExamples") && typeof options.neighborExamples === "string") {
options.neighborExamples = {_id : options.neighborExamples};
}
var neighbors = [],
params = TRAVERSAL_PARAMS(),
factory = TRAVERSAL.generalGraphDatasourceFactory(graphName);

View File

@ -166,12 +166,12 @@ function printUsage () {
function filterTestcaseByOptions (testname, options, whichFilter)
{
if ((testname.indexOf("-cluster") !== -1) && (options.cluster === false)) {
whichFilter.filter = 'cluster';
whichFilter.filter = 'noncluster';
return false;
}
if (testname.indexOf("-noncluster") !== -1 && (options.cluster === true)) {
whichFilter.filter = 'noncluster';
whichFilter.filter = 'cluster';
return false;
}

View File

@ -2881,6 +2881,112 @@ testGRAPH_DIAMETER_AND_RADIUS: function () {
};
}
function ahuacatlQueryMultiCollectionMadnessTestSuite() {
var gN = "UnitTestsAhuacatlGraph";
var v1 = "UnitTestsAhuacatlVertex1";
var v2 = "UnitTestsAhuacatlVertex2";
var v3 = "UnitTestsAhuacatlVertex3";
var e1 = "UnitTestsAhuacatlEdge1";
var e2 = "UnitTestsAhuacatlEdge2";
var s1;
var c1;
var t1;
var s2;
var c2;
var t2;
var AQL_NEIGHBORS = "FOR e IN GRAPH_NEIGHBORS(@name, @example, @options) SORT e.vertex._id, e.path.edges[0].what RETURN e";
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp: function () {
db._drop(v1);
db._drop(v2);
db._drop(v3);
db._drop(e1);
db._drop(e2);
var vertex1 = db._create(v1);
var vertex2 = db._create(v2);
var vertex3 = db._create(v3);
var edge1 = db._createEdgeCollection(e1);
var edge2 = db._createEdgeCollection(e2);
s1 = vertex1.save({ _key: "start"})._id;
c1 = vertex2.save({ _key: "center"})._id;
t1 = vertex3.save({ _key: "target"})._id;
s2 = vertex1.save({ _key: "start2"})._id;
c2 = vertex2.save({ _key: "center2"})._id;
t2 = vertex3.save({ _key: "target2"})._id;
function makeEdge(from, to, collection) {
collection.save(from, to, {});
}
makeEdge(s1, c1, edge1);
makeEdge(t1, c1, edge2);
makeEdge(s2, c2, edge1);
makeEdge(t2, c2, edge2);
makeEdge(t1, c2, edge2);
try {
graph._drop(gN);
} catch (ignore) {
}
graph._create(
gN,
graph._edgeDefinitions(
graph._relation(e1, v1, v2),
graph._relation(e2, v3, v2)
)
);
},
tearDown: function () {
graph._drop(gN, true);
},
testRestrictedPathHops1: function() {
var bindVars = {
name: gN,
example: s1,
options: {
direction : 'any',
minDepth: 2,
maxDepth: 2,
vertexCollectionRestriction: v3,
edgeCollectionRestriction: [e1, e2]
}
};
var actual = getRawQueryResults(AQL_NEIGHBORS, bindVars);
assertEqual(actual.length, 1);
assertEqual(actual[0].vertex._id, t1);
},
testRestrictedPathHops2: function() {
var bindVars = {
name: gN,
example: s2,
options: {
direction : 'any',
minDepth: 2,
maxDepth: 2,
vertexCollectionRestriction: v3,
edgeCollectionRestriction: [e1, e2]
}
};
var actual = getRawQueryResults(AQL_NEIGHBORS, bindVars);
assertEqual(actual.length, 2);
assertEqual(actual[0].vertex._id, t1);
assertEqual(actual[1].vertex._id, t2);
}
};
}
////////////////////////////////////////////////////////////////////////////////
@ -2891,6 +2997,7 @@ jsunity.run(ahuacatlQueryGeneralCyclesSuite);
jsunity.run(ahuacatlQueryGeneralTraversalTestSuite);
jsunity.run(ahuacatlQueryGeneralPathsTestSuite);
jsunity.run(ahuacatlQueryGeneralEdgesTestSuite);
jsunity.run(ahuacatlQueryMultiCollectionMadnessTestSuite);
return jsunity.done();

File diff suppressed because it is too large Load Diff