mirror of https://gitee.com/bigwinds/arangodb
fix invalid results (too many) when a skipping LIMIT was used for a traversal (#2603)
`LIMIT x` or `LIMIT 0, x` were not affected, but `LIMIT s, x` may have returned too many results
This commit is contained in:
parent
b32db87b67
commit
f38ba7c84d
|
@ -1,6 +1,10 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* fix invalid results (too many) when a skipping LIMIT was used for a
|
||||
traversal. `LIMIT x` or `LIMIT 0, x` were not affected, but `LIMIT s, x`
|
||||
may have returned too many results
|
||||
|
||||
* fix races in SSL communication code
|
||||
|
||||
* fix invalid locking in JWT authentication cache, which could have
|
||||
|
@ -319,6 +323,10 @@ v3.2.alpha1 (2017-02-05)
|
|||
v3.1.23 (XXXX-XX-XX)
|
||||
--------------------
|
||||
|
||||
* fix invalid results (too many) when a skipping LIMIT was used for a
|
||||
traversal. `LIMIT x` or `LIMIT 0, x` were not affected, but `LIMIT s, x`
|
||||
may have returned too many results
|
||||
|
||||
* fix invalid first group results for sorted AQL COLLECT when LIMIT
|
||||
was used
|
||||
|
||||
|
|
|
@ -293,7 +293,7 @@ size_t TraversalBlock::skipPaths(size_t hint) {
|
|||
|
||||
void TraversalBlock::initializeExpressions(AqlItemBlock const* items, size_t pos) {
|
||||
// Initialize the Expressions within the options.
|
||||
// We need to find the variable and read it's value here. Everything is computed right now.
|
||||
// We need to find the variable and read its value here. Everything is computed right now.
|
||||
_opts->clearVariableValues();
|
||||
TRI_ASSERT(_inVars.size() == _inRegs.size());
|
||||
for (size_t i = 0; i < _inVars.size(); ++i) {
|
||||
|
@ -461,39 +461,46 @@ size_t TraversalBlock::skipSome(size_t atLeast, size_t atMost) {
|
|||
if (_done) {
|
||||
return skipped;
|
||||
}
|
||||
|
||||
if (_posInPaths < _vertices.size()) {
|
||||
skipped += (std::min)(atMost, _vertices.size() - _posInPaths);
|
||||
_posInPaths += skipped;
|
||||
}
|
||||
|
||||
if (_buffer.empty()) {
|
||||
size_t toFetch = (std::min)(DefaultBatchSize(), atMost);
|
||||
if (!ExecutionBlock::getBlock(toFetch, toFetch)) {
|
||||
_done = true;
|
||||
return skipped;
|
||||
while (skipped < atLeast) {
|
||||
if (_buffer.empty()) {
|
||||
size_t toFetch = (std::min)(DefaultBatchSize(), atMost);
|
||||
if (!ExecutionBlock::getBlock(toFetch, toFetch)) {
|
||||
_done = true;
|
||||
return skipped;
|
||||
}
|
||||
_pos = 0; // this is in the first block
|
||||
}
|
||||
_pos = 0; // this is in the first block
|
||||
}
|
||||
|
||||
// If we get here, we do have _buffer.front()
|
||||
AqlItemBlock* cur = _buffer.front();
|
||||
|
||||
if (_pos == 0) {
|
||||
// Initial initialisation
|
||||
// If we get here, we do have _buffer.front()
|
||||
AqlItemBlock* cur = _buffer.front();
|
||||
initializePaths(cur, _pos);
|
||||
|
||||
while (atMost > skipped) {
|
||||
TRI_ASSERT(atMost >= skipped);
|
||||
skipped += skipPaths(atMost - skipped);
|
||||
|
||||
if (_traverser->hasMore()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if (++_pos >= cur->size()) {
|
||||
_buffer.pop_front(); // does not throw
|
||||
delete cur;
|
||||
_pos = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
initializePaths(cur, _pos);
|
||||
}
|
||||
}
|
||||
|
||||
size_t available = _vertices.size() - _posInPaths;
|
||||
// We have not yet fetched any paths. We can skip the next atMost many
|
||||
if (available == 0) {
|
||||
return skipPaths(atMost);
|
||||
}
|
||||
// We have fewer paths available in our list, so we clear the list and thereby
|
||||
// skip these.
|
||||
if (available <= atMost) {
|
||||
freeCaches();
|
||||
_posInPaths = 0;
|
||||
return available;
|
||||
}
|
||||
_posInPaths += atMost;
|
||||
// Skip the next atMost many paths.
|
||||
return atMost;
|
||||
return skipped;
|
||||
|
||||
// cppcheck-suppress style
|
||||
DEBUG_END_BLOCK();
|
||||
|
|
|
@ -70,6 +70,73 @@ var createBaseGraph = function () {
|
|||
edge.FE = ec.save(vertex.F, vertex.E, {})._id;
|
||||
};
|
||||
|
||||
function limitSuite () {
|
||||
const gn = "UnitTestGraph";
|
||||
var c;
|
||||
|
||||
return {
|
||||
|
||||
setUpAll: function() {
|
||||
db._drop(gn + "v");
|
||||
db._drop(gn + "e");
|
||||
|
||||
var i;
|
||||
|
||||
var c = db._create(gn + "v");
|
||||
for (i = 0; i < 10000; ++i) {
|
||||
c.insert({_key: "test" + i });
|
||||
}
|
||||
|
||||
c = db._createEdgeCollection(gn + "e");
|
||||
for (i = 0; i < 10000; ++i) {
|
||||
c.insert({ _from: gn + "v/test" + i, _to: gn + "v/test" + i });
|
||||
}
|
||||
|
||||
},
|
||||
|
||||
tearDownAll: function () {
|
||||
db._drop(gn + "v");
|
||||
db._drop(gn + "e");
|
||||
},
|
||||
|
||||
testLimits: function() {
|
||||
var queries = [
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 0, 10000 RETURN e", 10000 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 0, 1000 RETURN e", 1000 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 0, 100 RETURN e", 100 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 0, 10 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10, 10 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10, 100 RETURN e", 100 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10, 1000 RETURN e", 1000 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10, 10000 RETURN e", 9990 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 1000, 1 RETURN e", 1 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 1000, 10 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 1000, 100 RETURN e", 100 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 1000, 1000 RETURN e", 1000 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 1000, 10000 RETURN e", 9000 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9990, 1 RETURN e", 1 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9990, 9 RETURN e", 9 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9990, 10 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9990, 11 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9990, 100 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9990, 1000 RETURN e", 10 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9999, 1 RETURN e", 1 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9999, 10 RETURN e", 1 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9999, 100 RETURN e", 1 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 9999, 1000 RETURN e", 1 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10000, 0 RETURN e", 0 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10000, 1 RETURN e", 0 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10000, 10 RETURN e", 0 ],
|
||||
[ "FOR v IN " + gn + "v FOR e IN 1..1 OUTBOUND v._id " + gn + "e LIMIT 10000, 1000 RETURN e", 0 ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
assertEqual(query[1], AQL_EXECUTE(query[0]).json.length);
|
||||
});
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
function nestedSuite () {
|
||||
const gn = "UnitTestGraph";
|
||||
var objects, tags, tagged;
|
||||
|
@ -3321,6 +3388,7 @@ function optimizeNonVertexCentricIndexesSuite () {
|
|||
};
|
||||
};
|
||||
|
||||
jsunity.run(limitSuite);
|
||||
jsunity.run(nestedSuite);
|
||||
jsunity.run(namedGraphSuite);
|
||||
jsunity.run(multiCollectionGraphSuite);
|
||||
|
|
Loading…
Reference in New Issue