1
0
Fork 0

Fix Foxx queues (3.3) (#5353)

This commit is contained in:
Mark 2018-05-16 15:28:25 +02:00 committed by Jan
parent 4eba1e1a0c
commit 911b1d1ce1
5 changed files with 69 additions and 24 deletions

View File

@ -1,5 +1,7 @@
v3.3.9 (xxxx-xx-xx)
-------------------
* fixed Foxx queues bug when queues are created in a request handler with an
ArangoDB authentication header
* upgraded arangosync version to 0.5.1
@ -20,7 +22,7 @@ v3.3.9 (xxxx-xx-xx)
will always return the first value for non-unique attribute names and not the
last occurring value.
* self heal during a Foxx service install, upgrade or replace no longer breaks
* self heal during a Foxx service install, upgrade or replace no longer breaks
the respective operation
* make /_api/index, /_api/database and /_api/user REST handlers use the scheduler's internal
@ -52,16 +54,16 @@ v3.3.9 (xxxx-xx-xx)
This optimization can speed up gathering data from multiple shards, because
it allows to remove a merge sort of the individual shards' results.
* extend the already existing "reduce-extraction-to-projection" AQL optimizer
* extend the already existing "reduce-extraction-to-projection" AQL optimizer
rule for RocksDB to provide projections of up to 5 document attributes. The
previous implementation only supported a projection for a single document
attribute. The new implementation will extract up to 5 document attributes from
previous implementation only supported a projection for a single document
attribute. The new implementation will extract up to 5 document attributes from
a document while scanning a collection via an EnumerateCollectionNode.
Additionally the new version of the optimizer rule can also produce projections
when scanning an index via an IndexNode.
when scanning an index via an IndexNode.
The optimization is benefial especially for huge documents because it will copy
out only the projected attributes from the document instead of copying the entire
document data from the storage engine.
out only the projected attributes from the document instead of copying the entire
document data from the storage engine.
When applied, the explainer will show the projected attributes in a `projections`
remark for an EnumerateCollectionNode or IndexNode. The optimization is limited
@ -73,7 +75,7 @@ v3.3.9 (xxxx-xx-xx)
This optimization will be triggered for the RocksDB engine if an index is used
that covers all required attributes of the document used later on in the query.
If applied, it will save retrieving the actual document data (which would require
an extra lookup in RocksDB), but will instead build the document data solely
an extra lookup in RocksDB), but will instead build the document data solely
from the index values found. It will only be applied when using up to 5 attributes
from the document, and only if the rest of the document data is not used later
on in the query.
@ -87,11 +89,11 @@ v3.3.9 (xxxx-xx-xx)
* added scan-only optimization for AQL queries that iterate over collections or
indexes and that do not need to return the actual document values.
Not fetching the document values from the storage engine will provide a
Not fetching the document values from the storage engine will provide a
considerable speedup when using the RocksDB engine, but may also help a bit
in case of the MMFiles engine. The optimization will only be applied when
in case of the MMFiles engine. The optimization will only be applied when
full-scanning or index-scanning a collection without refering to any of its
documents later on, and, for an IndexNode, if all filter conditions for the
documents later on, and, for an IndexNode, if all filter conditions for the
documents of the collection are covered by the index.
If the optimization is applied, it will show up as "scan only" in an AQL
@ -107,7 +109,7 @@ v3.3.9 (xxxx-xx-xx)
- COLLECT var1 = expr1, ..., varn = exprn (WITH COUNT INTO ...), without INTO or KEEP
- COLLECT var1 = expr1, ..., varn = exprn AGGREGATE ..., without INTO or KEEP, for
aggregate functions COUNT/LENGTH, SUM, MIN and MAX.
* honor specified COLLECT method in AQL COLLECT options
for example, when the user explicitly asks for the COLLECT method

View File

@ -37,6 +37,7 @@ const fs = require('fs');
const internal = require('internal');
const basePath = fs.makeAbsolute(fs.join(internal.startupPath, 'common', 'test-data', 'apps'));
const download = internal.download;
const request = require('@arangodb/request');
const arangodb = require('@arangodb');
const arango = require('@arangodb').arango;
@ -115,16 +116,46 @@ describe('Foxx service', () => {
expect(jobResult.length).to.equal(1);
});
const waitForJob = () => {
it('should support jobs running in the queue', () => {
let res = request.post(`${arango.getEndpoint().replace('tcp://', 'http://')}/${mount}`, {
body: JSON.stringify({repeatTimes: 2})
});
expect(res.statusCode).to.equal(204);
expect(waitForJob(2)).to.equal(true, 'job from foxx queue did not run!');
const jobResult = db._query(aql`
FOR i IN foxx_queue_test
FILTER i.job == true
RETURN 1
`).toArray();
expect(jobResult.length).to.equal(2);
});
it('should ignore the arango user', () => {
let res = download(`${arango.getEndpoint().replace('tcp://', 'http://')}/${mount}`, '', {
method: 'post',
username: 'root',
password: ''
});
expect(res.code).to.equal(204);
expect(waitForJob()).to.equal(true, 'job from foxx queue did not run!');
const jobResult = db._query(aql`
FOR i IN foxx_queue_test
FILTER i.job == true
RETURN 1
`).toArray();
expect(jobResult.length).to.equal(1);
});
const waitForJob = (runs = 1) => {
let i = 0;
while (i++ < 50) {
internal.wait(0.1);
const jobs = db._query(aql`
FOR job IN _jobs
FILTER job.type.mount == ${mount}
RETURN job.status
RETURN job
`).toArray();
if (jobs.length === 1 && jobs[0] === 'complete') {
if (jobs.length === 1 && jobs[0].status === 'complete' && jobs[0].runs === runs) {
return true;
}
}

View File

@ -36,6 +36,7 @@ const fs = require('fs');
const internal = require('internal');
const basePath = fs.makeAbsolute(fs.join(internal.startupPath, 'common', 'test-data', 'apps'));
const download = internal.download;
const request = require('@arangodb/request');
const arangodb = require('@arangodb');
const arango = require('@arangodb').arango;
@ -107,19 +108,33 @@ describe('Foxx service', () => {
`).toArray();
expect(jobResult.length).to.equal(1);
});
const waitForJob = () => {
it('should support repeating job running in the queue', () => {
let res = request.post(`${arango.getEndpoint().replace('tcp://', 'http://')}/${mount}`, {
body: JSON.stringify({repeatTimes: 2})
});
expect(res.statusCode).to.equal(204);
expect(waitForJob(2)).to.equal(true);
const jobResult = db._query(aql`
FOR i IN foxx_queue_test
FILTER i.job == true
RETURN 1
`).toArray();
expect(jobResult.length).to.equal(2);
});
const waitForJob = (runs = 1) => {
let i = 0;
while (i++ < 50) {
internal.wait(0.1);
const jobs = db._query(aql`
FOR job IN _jobs
FILTER job.type.mount == ${mount}
RETURN job.status
RETURN job
`).toArray();
if (jobs.length === 1 && jobs[0] === 'complete') {
if (jobs.length === 1 && jobs[0].status === 'complete' && jobs[0].runs === runs) {
return true;
}
}
return false;
};
});

View File

@ -4,11 +4,12 @@ const router = require('@arangodb/foxx/router')();
module.context.use(router);
router.post((req, res) => {
const body = req.body && req.body.length ? JSON.parse(req.body.toString()) : {};
const queue = queues.create('test_queue');
queue.push({
name: 'job',
mount: '/queue_test_mount'
}, {});
}, {}, body || {});
});
router.delete((req, res) => {
@ -18,3 +19,4 @@ router.delete((req, res) => {
}
queues.delete('test_queue');
});

View File

@ -46,10 +46,6 @@ var runInDatabase = function () {
busy = true;
return;
}
// should always call the user who called createQueue
// registerTask will throw a forbidden exception if anyone
// other than superroot uses this option
let runAsUser = queue.runAsUser || '';
var now = Date.now();
var max = queue.maxWorkers - numBusy;
@ -83,7 +79,6 @@ var runInDatabase = function () {
},
offset: 0,
isSystem: true,
runAsUser: runAsUser,
params: {
job: Object.assign({}, job, {
status: 'progress'