1
0
Fork 0

added "peakMemoryUsage" query result figure (#7952)

This commit is contained in:
Jan 2019-01-17 13:58:18 +01:00 committed by GitHub
parent 1b61f64b22
commit 9635df56eb
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
12 changed files with 54 additions and 18 deletions

View File

@ -1,6 +1,10 @@
devel
-----
* added "peakMemoryUsage" in query results figures, showing the peak memory
usage of the executed query. In a cluster, the value the peak memory usage
of all shards, but it is not summed up across shards.
* fixed an issue where a crashed coordinator can lead to some Foxx queue jobs
erroneously either left hanging or being restarted
@ -12,7 +16,7 @@ devel
millisecond parts of AQL date values were limited to up to 3 digits.
Now the length of the millisecond part is unrestricted, but the
millisecond precision is still limit to up to 3 digits.
millisecond precision is still limited to up to 3 digits.
* fix issue #7900: Bind values of `null` are not replaced by empty string
anymore, when toggling between JSON and table view in the web UI

View File

@ -47,6 +47,13 @@ The meaning of the statistics attributes is as follows:
This attribute may only be returned if the `fullCount` option was set when starting the
query and will only contain a sensible value if the query contained a `LIMIT` operation on
the top level.
* *peakMemoryUsage*: the maximum memory usage of the query while it was running. In a cluster,
the memory accounting is done per shard, and the memory usage reported is the peak
memory usage value from the individual shards.
Note that to keep things light-weight, the per-query memory usage is tracked on a relatively
high level, not including any memory allocator overhead nor any memory used for temporary
results calculations (e.g. memory allocated/deallocated inside AQL expressions and function
calls).
* *nodes*: _(optional)_ when the query was executed with the option `profile` set to at least *2*,
then this value contains runtime statistics per query execution node. This field contains the
node id (in `id`), the number of calls to this node `calls` and the number of items returned

View File

@ -45,8 +45,9 @@ void ExecutionStats::toVelocyPack(VPackBuilder& builder, bool reportFullCount) c
// fullCount is optional
builder.add("fullCount", VPackValue(fullCount > count ? fullCount : count));
}
// builder.add("count", VPackValue(count));
builder.add("executionTime", VPackValue(executionTime));
builder.add("peakMemoryUsage", VPackValue(peakMemoryUsage));
if (!nodes.empty()) {
builder.add("nodes", VPackValue(VPackValueType::Array));
@ -80,6 +81,7 @@ void ExecutionStats::add(ExecutionStats const& summand) {
fullCount += summand.fullCount;
}
count += summand.count;
peakMemoryUsage = std::max(summand.peakMemoryUsage, peakMemoryUsage);
// intentionally no modification of executionTime
for (auto const& pair : summand.nodes) {
@ -99,7 +101,8 @@ ExecutionStats::ExecutionStats()
requests(0),
fullCount(0),
count(0),
executionTime(0.0) {}
executionTime(0.0),
peakMemoryUsage(0) {}
ExecutionStats::ExecutionStats(VPackSlice const& slice) : ExecutionStats() {
if (!slice.isObject()) {

View File

@ -64,6 +64,9 @@ struct ExecutionStats {
/// @brief sets query execution time from the outside
void setExecutionTime(double value) { executionTime = value; }
/// @brief sets the peak memory usage from the outside
void setPeakMemoryUsage(size_t value) { peakMemoryUsage = value; }
/// @brief sumarize two sets of ExecutionStats
void add(ExecutionStats const& summand);
@ -78,6 +81,7 @@ struct ExecutionStats {
fullCount = 0;
count = 0;
executionTime = 0.0;
peakMemoryUsage = 0;
}
/// @brief number of successfully executed write operations
@ -108,6 +112,9 @@ struct ExecutionStats {
/// the outside
double executionTime;
/// @brief peak memory usage of the query
size_t peakMemoryUsage;
/// @brief statistics per ExecutionNodes
std::map<size_t, ExecutionStats::Node> nodes;
};

View File

@ -279,6 +279,7 @@ void Query::kill() { _killed = true; }
void Query::setExecutionTime() {
if (_engine != nullptr) {
_engine->_stats.setPeakMemoryUsage(_resourceMonitor.currentResources.peakMemoryUsage);
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
}
}
@ -976,7 +977,7 @@ ExecutionState Query::finalize(QueryResult& result) {
<< "Query::finalize: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t)this;
_engine->_stats.setExecutionTime(runTime());
setExecutionTime();
enterState(QueryExecutionState::ValueType::FINALIZATION);
result.extra = std::make_shared<VPackBuilder>();
@ -1009,7 +1010,7 @@ ExecutionState Query::finalize(QueryResult& result) {
// patch executionTime stats value in place
// we do this because "executionTime" should include the whole span of the
// execution and we have to set it at the very end
double const rt = runTime(now);
double const rt = now - _startTime;
basics::VelocyPackHelper::patchDouble(result.extra->slice().get("stats").get("executionTime"),
rt);
@ -1244,7 +1245,7 @@ void Query::exitContext() {
/// @brief returns statistics for current query.
void Query::getStats(VPackBuilder& builder) {
if (_engine != nullptr) {
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
setExecutionTime();
_engine->_stats.toVelocyPack(builder, _queryOptions.fullCount);
} else {
ExecutionStats::toVelocyPackStatic(builder);

View File

@ -150,12 +150,6 @@ class Query {
/// @brief return the start timestamp of the query
double startTime() const { return _startTime; }
/// @brief return the current runtime of the query
double runTime(double now) const { return now - _startTime; }
/// @brief return the current runtime of the query
double runTime() const { return runTime(TRI_microtime()); }
/// @brief the part of the query
inline QueryPart part() const { return _part; }
@ -471,4 +465,4 @@ class Query {
} // namespace aql
} // namespace arangodb
#endif
#endif

View File

@ -27,16 +27,26 @@
#include "Basics/Common.h"
#include "Basics/Exceptions.h"
#include <algorithm>
namespace arangodb {
namespace aql {
struct ResourceUsage {
constexpr ResourceUsage() : memoryUsage(0) {}
explicit ResourceUsage(size_t memoryUsage) : memoryUsage(memoryUsage) {}
constexpr ResourceUsage()
: memoryUsage(0),
peakMemoryUsage(0) {}
ResourceUsage(ResourceUsage const& other) noexcept
: memoryUsage(other.memoryUsage),
peakMemoryUsage(other.peakMemoryUsage) {}
void clear() { memoryUsage = 0; }
void clear() {
memoryUsage = 0;
peakMemoryUsage = 0;
}
size_t memoryUsage;
size_t peakMemoryUsage;
};
struct ResourceMonitor {
@ -47,12 +57,16 @@ struct ResourceMonitor {
void setMemoryLimit(size_t value) { maxResources.memoryUsage = value; }
inline void increaseMemoryUsage(size_t value) {
currentResources.memoryUsage += value;
if (maxResources.memoryUsage > 0 &&
currentResources.memoryUsage + value > maxResources.memoryUsage) {
ADB_UNLIKELY(currentResources.memoryUsage > maxResources.memoryUsage)) {
currentResources.memoryUsage -= value;
THROW_ARANGO_EXCEPTION_MESSAGE(
TRI_ERROR_RESOURCE_LIMIT, "query would use more memory than allowed");
}
currentResources.memoryUsage += value;
currentResources.peakMemoryUsage = std::max(currentResources.memoryUsage, currentResources.peakMemoryUsage);
}
inline void decreaseMemoryUsage(size_t value) noexcept {

View File

@ -430,6 +430,7 @@ function getQueryMultiplePlansAndExecutions (query, bindVars, testObject, debug)
delete results[i].stats.filtered;
delete results[i].stats.executionTime;
delete results[i].stats.httpRequests;
delete results[i].stats.peakMemoryUsage;
delete results[i].stats.fullCount;
if (debug) {

View File

@ -250,6 +250,7 @@ function assertIsProfileStatsObject (stats, {level}) {
'scannedIndex',
'filtered',
'httpRequests',
'peakMemoryUsage',
'executionTime',
];
@ -266,6 +267,7 @@ function assertIsProfileStatsObject (stats, {level}) {
expect(stats.scannedIndex).to.be.a('number');
expect(stats.filtered).to.be.a('number');
expect(stats.httpRequests).to.be.a('number');
expect(stats.peakMemoryUsage).to.be.a('number');
expect(stats.executionTime).to.be.a('number');
}

View File

@ -44,6 +44,7 @@ var sanitizeStats = function (stats) {
delete stats.executionTime;
delete stats.httpRequests;
delete stats.fullCount;
delete stats.peakMemoryUsage;
return stats;
};

View File

@ -48,6 +48,7 @@ var sanitizeStats = function (stats) {
delete stats.executionTime;
delete stats.httpRequests;
delete stats.fullCount;
delete stats.peakMemoryUsage;
return stats;
};

View File

@ -56,6 +56,7 @@ var sanitizeStats = function (stats) {
delete stats.executionTime;
delete stats.httpRequests;
delete stats.fullCount;
delete stats.peakMemoryUsage;
return stats;
};