mirror of https://gitee.com/bigwinds/arangodb
add state "loading collections" to query profile results
This commit is contained in:
parent
4150fa7ec7
commit
e88460efcb
|
@ -21,7 +21,7 @@
|
||||||
/// @author Jan Steemann
|
/// @author Jan Steemann
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#include "Aql/ExecutionStats.h"
|
#include "ExecutionStats.h"
|
||||||
#include "Basics/Exceptions.h"
|
#include "Basics/Exceptions.h"
|
||||||
|
|
||||||
#include <velocypack/Builder.h>
|
#include <velocypack/Builder.h>
|
||||||
|
|
|
@ -67,6 +67,7 @@ static std::string StateNames[] = {
|
||||||
"initializing", // INITIALIZATION
|
"initializing", // INITIALIZATION
|
||||||
"parsing", // PARSING
|
"parsing", // PARSING
|
||||||
"optimizing ast", // AST_OPTIMIZATION
|
"optimizing ast", // AST_OPTIMIZATION
|
||||||
|
"loading collections", // LOADING_COLLECTIONS
|
||||||
"instantiating plan", // PLAN_INSTANTIATION
|
"instantiating plan", // PLAN_INSTANTIATION
|
||||||
"optimizing plan", // PLAN_OPTIMIZATION
|
"optimizing plan", // PLAN_OPTIMIZATION
|
||||||
"executing", // EXECUTION
|
"executing", // EXECUTION
|
||||||
|
@ -456,17 +457,19 @@ QueryResult Query::prepare(QueryRegistry* registry) {
|
||||||
|
|
||||||
if (_queryString != nullptr) {
|
if (_queryString != nullptr) {
|
||||||
// we have an AST
|
// we have an AST
|
||||||
|
// optimize the ast
|
||||||
|
enterState(AST_OPTIMIZATION);
|
||||||
|
|
||||||
|
parser->ast()->validateAndOptimize();
|
||||||
|
|
||||||
|
enterState(LOADING_COLLECTIONS);
|
||||||
|
|
||||||
int res = trx->begin();
|
int res = trx->begin();
|
||||||
|
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
return transactionError(res);
|
return transactionError(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
// optimize the ast
|
|
||||||
enterState(AST_OPTIMIZATION);
|
|
||||||
|
|
||||||
parser->ast()->validateAndOptimize();
|
|
||||||
|
|
||||||
enterState(PLAN_INSTANTIATION);
|
enterState(PLAN_INSTANTIATION);
|
||||||
plan.reset(ExecutionPlan::instantiateFromAst(parser->ast()));
|
plan.reset(ExecutionPlan::instantiateFromAst(parser->ast()));
|
||||||
|
|
||||||
|
@ -479,14 +482,14 @@ QueryResult Query::prepare(QueryRegistry* registry) {
|
||||||
// Run the query optimizer:
|
// Run the query optimizer:
|
||||||
enterState(PLAN_OPTIMIZATION);
|
enterState(PLAN_OPTIMIZATION);
|
||||||
arangodb::aql::Optimizer opt(maxNumberOfPlans());
|
arangodb::aql::Optimizer opt(maxNumberOfPlans());
|
||||||
// getenabled/disabled rules
|
// get enabled/disabled rules
|
||||||
opt.createPlans(plan.release(), getRulesFromOptions(),
|
opt.createPlans(plan.release(), getRulesFromOptions(),
|
||||||
inspectSimplePlans());
|
inspectSimplePlans());
|
||||||
// Now plan and all derived plans belong to the optimizer
|
// Now plan and all derived plans belong to the optimizer
|
||||||
plan.reset(opt.stealBest()); // Now we own the best one again
|
plan.reset(opt.stealBest()); // Now we own the best one again
|
||||||
planRegisters = true;
|
planRegisters = true;
|
||||||
} else { // no queryString, we are instantiating from _queryBuilder
|
} else { // no queryString, we are instantiating from _queryBuilder
|
||||||
enterState(PLAN_INSTANTIATION);
|
enterState(PARSING);
|
||||||
|
|
||||||
VPackSlice const querySlice = _queryBuilder->slice();
|
VPackSlice const querySlice = _queryBuilder->slice();
|
||||||
ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice);
|
ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice);
|
||||||
|
@ -495,7 +498,9 @@ QueryResult Query::prepare(QueryRegistry* registry) {
|
||||||
// creating the plan may have produced some collections
|
// creating the plan may have produced some collections
|
||||||
// we need to add them to the transaction now (otherwise the query will
|
// we need to add them to the transaction now (otherwise the query will
|
||||||
// fail)
|
// fail)
|
||||||
|
|
||||||
|
enterState(LOADING_COLLECTIONS);
|
||||||
|
|
||||||
int res = trx->addCollectionList(_collections.collections());
|
int res = trx->addCollectionList(_collections.collections());
|
||||||
|
|
||||||
if (res == TRI_ERROR_NO_ERROR) {
|
if (res == TRI_ERROR_NO_ERROR) {
|
||||||
|
@ -505,6 +510,8 @@ QueryResult Query::prepare(QueryRegistry* registry) {
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
return transactionError(res);
|
return transactionError(res);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enterState(PLAN_INSTANTIATION);
|
||||||
|
|
||||||
// we have an execution plan in VelocyPack format
|
// we have an execution plan in VelocyPack format
|
||||||
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
|
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
|
||||||
|
@ -702,9 +709,14 @@ QueryResult Query::execute(QueryRegistry* registry) {
|
||||||
result.profile = _profile->toVelocyPack();
|
result.profile = _profile->toVelocyPack();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// patch stats in place
|
||||||
|
// we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end
|
||||||
|
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime);
|
||||||
|
|
||||||
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
|
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
|
||||||
<< "Query::execute:returning"
|
<< "Query::execute:returning"
|
||||||
<< " this: " << (uintptr_t) this;
|
<< " this: " << (uintptr_t) this;
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
} catch (arangodb::basics::Exception const& ex) {
|
} catch (arangodb::basics::Exception const& ex) {
|
||||||
setExecutionTime();
|
setExecutionTime();
|
||||||
|
@ -881,6 +893,10 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
|
||||||
if (_profile != nullptr && profiling()) {
|
if (_profile != nullptr && profiling()) {
|
||||||
result.profile = _profile->toVelocyPack();
|
result.profile = _profile->toVelocyPack();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// patch executionTime stats value in place
|
||||||
|
// we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end
|
||||||
|
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
} catch (arangodb::basics::Exception const& ex) {
|
} catch (arangodb::basics::Exception const& ex) {
|
||||||
|
@ -941,6 +957,8 @@ QueryResult Query::explain() {
|
||||||
enterState(AST_OPTIMIZATION);
|
enterState(AST_OPTIMIZATION);
|
||||||
// optimize and validate the ast
|
// optimize and validate the ast
|
||||||
parser.ast()->validateAndOptimize();
|
parser.ast()->validateAndOptimize();
|
||||||
|
|
||||||
|
enterState(LOADING_COLLECTIONS);
|
||||||
|
|
||||||
// create the transaction object, but do not start it yet
|
// create the transaction object, but do not start it yet
|
||||||
_trx = new arangodb::AqlTransaction(createTransactionContext(),
|
_trx = new arangodb::AqlTransaction(createTransactionContext(),
|
||||||
|
|
|
@ -67,6 +67,7 @@ enum ExecutionState {
|
||||||
INITIALIZATION = 0,
|
INITIALIZATION = 0,
|
||||||
PARSING,
|
PARSING,
|
||||||
AST_OPTIMIZATION,
|
AST_OPTIMIZATION,
|
||||||
|
LOADING_COLLECTIONS,
|
||||||
PLAN_INSTANTIATION,
|
PLAN_INSTANTIATION,
|
||||||
PLAN_OPTIMIZATION,
|
PLAN_OPTIMIZATION,
|
||||||
EXECUTION,
|
EXECUTION,
|
||||||
|
|
|
@ -1915,6 +1915,7 @@
|
||||||
'startup time for query engine',
|
'startup time for query engine',
|
||||||
'query parsing',
|
'query parsing',
|
||||||
'abstract syntax tree optimizations',
|
'abstract syntax tree optimizations',
|
||||||
|
'loading collections',
|
||||||
'instanciation of initial execution plan',
|
'instanciation of initial execution plan',
|
||||||
'execution plan optimization and permutation',
|
'execution plan optimization and permutation',
|
||||||
'query execution'
|
'query execution'
|
||||||
|
|
|
@ -877,6 +877,15 @@ double VelocyPackHelper::toDouble(VPackSlice const& slice, bool& failed) {
|
||||||
failed = true;
|
failed = true;
|
||||||
return 0.0;
|
return 0.0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// modify a VPack double value in place
|
||||||
|
void VelocyPackHelper::patchDouble(VPackSlice slice, double value) {
|
||||||
|
TRI_ASSERT(slice.isDouble());
|
||||||
|
// get pointer to the start of the value
|
||||||
|
uint8_t* p = const_cast<uint8_t*>(slice.begin());
|
||||||
|
// skip one byte for the header and overwrite
|
||||||
|
*reinterpret_cast<double*>(p + 1) = value;
|
||||||
|
}
|
||||||
|
|
||||||
#ifndef USE_ENTERPRISE
|
#ifndef USE_ENTERPRISE
|
||||||
uint64_t VelocyPackHelper::hashByAttributes(
|
uint64_t VelocyPackHelper::hashByAttributes(
|
||||||
|
|
|
@ -324,6 +324,9 @@ class VelocyPackHelper {
|
||||||
|
|
||||||
static double toDouble(VPackSlice const&, bool&);
|
static double toDouble(VPackSlice const&, bool&);
|
||||||
|
|
||||||
|
// modify a VPack double value in place
|
||||||
|
static void patchDouble(VPackSlice slice, double value);
|
||||||
|
|
||||||
static uint64_t hashByAttributes(VPackSlice, std::vector<std::string> const&,
|
static uint64_t hashByAttributes(VPackSlice, std::vector<std::string> const&,
|
||||||
bool, int&, std::string const& key = "");
|
bool, int&, std::string const& key = "");
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue