mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/ArangoDB/ArangoDB into devel
This commit is contained in:
commit
8a9d3ec20d
|
@ -544,7 +544,7 @@ if test -n "${TARGET_DIR}"; then
|
|||
if test "`uname -o||true`" == "Cygwin"; then
|
||||
SSLDIR=`grep FIND_PACKAGE_MESSAGE_DETAILS_OpenSSL CMakeCache.txt |sed -e "s/.*optimized;//" -e "s/;.*//" -e "s;/lib.*lib;;" -e "s;\([a-zA-Z]*\):;/cygdrive/\1;"`
|
||||
DLLS=`find ${SSLDIR} -name \*.dll |grep -i release`
|
||||
cp ${DLLS} bin
|
||||
cp ${DLLS} bin/${BUILD_CONFIG}
|
||||
fi
|
||||
tar -u -f ${TARFILE_TMP} \
|
||||
bin etc tests
|
||||
|
|
|
@ -920,35 +920,37 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
engineInfo.close(); // base
|
||||
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
if (!shardSet.empty()) {
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
|
||||
auto res = cc->syncRequest("", coordTransactionID, "server:" + list.first,
|
||||
RequestType::POST, url, engineInfo.toJson(),
|
||||
headers, 30.0);
|
||||
if (res->status != CL_COMM_SENT) {
|
||||
// Note If there was an error on server side we do not have CL_COMM_SENT
|
||||
std::string message("could not start all traversal engines");
|
||||
if (res->errorMessage.length() > 0) {
|
||||
message += std::string(" : ") + res->errorMessage;
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message);
|
||||
} else {
|
||||
// Only if the result was successful we will get here
|
||||
arangodb::basics::StringBuffer& body = res->result->getBody();
|
||||
|
||||
std::shared_ptr<VPackBuilder> builder =
|
||||
VPackParser::fromJson(body.c_str(), body.length());
|
||||
VPackSlice resultSlice = builder->slice();
|
||||
if (!resultSlice.isNumber()) {
|
||||
auto res = cc->syncRequest("", coordTransactionID, "server:" + list.first,
|
||||
RequestType::POST, url, engineInfo.toJson(),
|
||||
headers, 30.0);
|
||||
if (res->status != CL_COMM_SENT) {
|
||||
// Note If there was an error on server side we do not have CL_COMM_SENT
|
||||
std::string message("could not start all traversal engines");
|
||||
if (res->errorMessage.length() > 0) {
|
||||
message += std::string(" : ") + res->errorMessage;
|
||||
}
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL, "got unexpected response from engine lock request");
|
||||
TRI_ERROR_QUERY_COLLECTION_LOCK_FAILED, message);
|
||||
} else {
|
||||
// Only if the result was successful we will get here
|
||||
arangodb::basics::StringBuffer& body = res->result->getBody();
|
||||
|
||||
std::shared_ptr<VPackBuilder> builder =
|
||||
VPackParser::fromJson(body.c_str(), body.length());
|
||||
VPackSlice resultSlice = builder->slice();
|
||||
if (!resultSlice.isNumber()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(
|
||||
TRI_ERROR_INTERNAL, "got unexpected response from engine lock request");
|
||||
}
|
||||
auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>();
|
||||
TRI_ASSERT(engineId != 0);
|
||||
traverserEngines.emplace(engineId, shardSet);
|
||||
en->addEngine(engineId, list.first);
|
||||
}
|
||||
auto engineId = resultSlice.getNumericValue<traverser::TraverserEngineID>();
|
||||
TRI_ASSERT(engineId != 0);
|
||||
traverserEngines.emplace(engineId, shardSet);
|
||||
en->addEngine(engineId, list.first);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -479,109 +479,125 @@ QueryResult Query::prepare(QueryRegistry* registry) {
|
|||
|
||||
bool planRegisters;
|
||||
|
||||
if (_queryString != nullptr) {
|
||||
// we have an AST
|
||||
// optimize the ast
|
||||
enterState(AST_OPTIMIZATION);
|
||||
try {
|
||||
// As soon as we start du instantiate the plan we have to clean it
|
||||
// up before killing the unique_ptr
|
||||
if (_queryString != nullptr) {
|
||||
// we have an AST
|
||||
// optimize the ast
|
||||
enterState(AST_OPTIMIZATION);
|
||||
|
||||
parser->ast()->validateAndOptimize();
|
||||
parser->ast()->validateAndOptimize();
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
|
||||
int res = trx->begin();
|
||||
int res = trx->begin();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return transactionError(res);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return transactionError(res);
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
plan.reset(ExecutionPlan::instantiateFromAst(parser->ast()));
|
||||
|
||||
if (plan.get() == nullptr) {
|
||||
// oops
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
"failed to create query execution engine");
|
||||
}
|
||||
|
||||
// Run the query optimizer:
|
||||
enterState(PLAN_OPTIMIZATION);
|
||||
arangodb::aql::Optimizer opt(maxNumberOfPlans());
|
||||
// get enabled/disabled rules
|
||||
opt.createPlans(plan.release(), getRulesFromOptions(),
|
||||
inspectSimplePlans());
|
||||
// Now plan and all derived plans belong to the optimizer
|
||||
plan.reset(opt.stealBest()); // Now we own the best one again
|
||||
planRegisters = true;
|
||||
} else { // no queryString, we are instantiating from _queryBuilder
|
||||
enterState(PARSING);
|
||||
|
||||
VPackSlice const querySlice = _queryBuilder->slice();
|
||||
ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice);
|
||||
|
||||
parser->ast()->variables()->fromVelocyPack(querySlice);
|
||||
// creating the plan may have produced some collections
|
||||
// we need to add them to the transaction now (otherwise the query will
|
||||
// fail)
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
|
||||
int res = trx->addCollectionList(_collections.collections());
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
res = trx->begin();
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return transactionError(res);
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
|
||||
// we have an execution plan in VelocyPack format
|
||||
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
|
||||
parser->ast(), _queryBuilder->slice()));
|
||||
if (plan.get() == nullptr) {
|
||||
// oops
|
||||
return QueryResult(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
planRegisters = false;
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
plan.reset(ExecutionPlan::instantiateFromAst(parser->ast()));
|
||||
TRI_ASSERT(plan.get() != nullptr);
|
||||
|
||||
if (plan.get() == nullptr) {
|
||||
// oops
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
"failed to create query execution engine");
|
||||
}
|
||||
// varsUsedLater and varsValid are unordered_sets and so their orders
|
||||
// are not the same in the serialized and deserialized plans
|
||||
|
||||
// Run the query optimizer:
|
||||
enterState(PLAN_OPTIMIZATION);
|
||||
arangodb::aql::Optimizer opt(maxNumberOfPlans());
|
||||
// get enabled/disabled rules
|
||||
opt.createPlans(plan.release(), getRulesFromOptions(),
|
||||
inspectSimplePlans());
|
||||
// Now plan and all derived plans belong to the optimizer
|
||||
plan.reset(opt.stealBest()); // Now we own the best one again
|
||||
planRegisters = true;
|
||||
} else { // no queryString, we are instantiating from _queryBuilder
|
||||
enterState(PARSING);
|
||||
// return the V8 context
|
||||
exitContext();
|
||||
|
||||
VPackSlice const querySlice = _queryBuilder->slice();
|
||||
ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice);
|
||||
enterState(EXECUTION);
|
||||
ExecutionEngine* engine(ExecutionEngine::instantiateFromPlan(
|
||||
registry, this, plan.get(), planRegisters));
|
||||
|
||||
parser->ast()->variables()->fromVelocyPack(querySlice);
|
||||
// creating the plan may have produced some collections
|
||||
// we need to add them to the transaction now (otherwise the query will
|
||||
// fail)
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
|
||||
int res = trx->addCollectionList(_collections.collections());
|
||||
|
||||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
res = trx->begin();
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
return transactionError(res);
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
|
||||
// we have an execution plan in VelocyPack format
|
||||
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
|
||||
parser->ast(), _queryBuilder->slice()));
|
||||
if (plan.get() == nullptr) {
|
||||
// oops
|
||||
return QueryResult(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
planRegisters = false;
|
||||
// If all went well so far, then we keep _plan, _parser and _trx and
|
||||
// return:
|
||||
_plan = plan.release();
|
||||
_parser = parser.release();
|
||||
_engine = engine;
|
||||
return QueryResult();
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
cleanupPlanAndEngine(ex.code());
|
||||
return QueryResult(ex.code(), ex.message() + getStateString());
|
||||
} catch (std::bad_alloc const&) {
|
||||
cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY);
|
||||
return QueryResult(
|
||||
TRI_ERROR_OUT_OF_MEMORY,
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
|
||||
} catch (std::exception const& ex) {
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
|
||||
} catch (...) {
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
|
||||
}
|
||||
|
||||
TRI_ASSERT(plan.get() != nullptr);
|
||||
|
||||
// varsUsedLater and varsValid are unordered_sets and so their orders
|
||||
// are not the same in the serialized and deserialized plans
|
||||
|
||||
// return the V8 context
|
||||
exitContext();
|
||||
|
||||
enterState(EXECUTION);
|
||||
ExecutionEngine* engine(ExecutionEngine::instantiateFromPlan(
|
||||
registry, this, plan.get(), planRegisters));
|
||||
|
||||
// If all went well so far, then we keep _plan, _parser and _trx and
|
||||
// return:
|
||||
_plan = plan.release();
|
||||
_parser = parser.release();
|
||||
_engine = engine;
|
||||
return QueryResult();
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
cleanupPlanAndEngine(ex.code());
|
||||
return QueryResult(ex.code(), ex.message() + getStateString());
|
||||
} catch (std::bad_alloc const&) {
|
||||
cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY);
|
||||
return QueryResult(
|
||||
TRI_ERROR_OUT_OF_MEMORY,
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
|
||||
} catch (std::exception const& ex) {
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
|
||||
} catch (...) {
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// @brief execute an AQL query
|
||||
|
|
Loading…
Reference in New Issue