From ea220a22cc8ba83d7c2abc991abba0b7832e3f67 Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Tue, 24 Jan 2017 16:50:52 +0100 Subject: [PATCH 1/7] Make updatecurrentcollection better testable --- js/server/modules/@arangodb/cluster.js | 24 ++++++++++--------- .../cluster-sync-test-noncluster-spec.js | 5 +++- 2 files changed, 17 insertions(+), 12 deletions(-) diff --git a/js/server/modules/@arangodb/cluster.js b/js/server/modules/@arangodb/cluster.js index e594b6aaed..2f7cc1238c 100644 --- a/js/server/modules/@arangodb/cluster.js +++ b/js/server/modules/@arangodb/cluster.js @@ -936,12 +936,14 @@ function updateCurrentForCollections(localErrors, current) { return payload; } - function makeDropCurrentEntryCollection(dbname, col, shard, trx) { - trx[0][curCollections + dbname + '/' + col + '/' + shard] = + function makeDropCurrentEntryCollection(dbname, col, shard) { + let trx = {}; + trx[curCollections + dbname + '/' + col + '/' + shard] = {op: 'delete'}; + return trx; } - let trx = [{}]; + let trx = {}; // Go through local databases and collections and add stuff to Current // as needed: @@ -962,7 +964,7 @@ function updateCurrentForCollections(localErrors, current) { let currentCollectionInfo = fetchKey(current, 'Collections', database, shardInfo.planId, shard); if (!_.isEqual(localCollectionInfo, currentCollectionInfo)) { - trx[0][curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name] = { + trx[curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name] = { op: 'set', new: localCollectionInfo, }; @@ -971,7 +973,7 @@ function updateCurrentForCollections(localErrors, current) { let currentServers = fetchKey(current, 'Collections', database, shardInfo.planId, shard, 'servers'); // we were previously leader and we are done resigning. update current and let supervision handle the rest if (Array.isArray(currentServers) && currentServers[0] === ourselves) { - trx[0][curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name + '/servers'] = { + trx[curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name + '/servers'] = { op: 'set', new: ['_' + ourselves].concat(db._collection(shardInfo.name).getFollowers()), }; @@ -1009,8 +1011,7 @@ function updateCurrentForCollections(localErrors, current) { let cur = currentCollections[database][collection][shard]; if (!localCollections.hasOwnProperty(shard) && cur.servers[0] === ourselves) { - makeDropCurrentEntryCollection(database, collection, shard, - trx); + Object.assign(trx, makeDropCurrentEntryCollection(database, collection, shard)); } } } @@ -1120,8 +1121,9 @@ function migratePrimary(plan, current) { // diff current and local and prepare agency transactions or whatever // to update current. Will report the errors created locally to the agency let trx = updateCurrentForCollections(localErrors, current); - if (trx.length > 0 && Object.keys(trx[0]).length !== 0) { - trx[0][curVersion] = {op: 'increment'}; + if (Object.keys(trx).length > 0) { + trx[curVersion] = {op: 'increment'}; + trx = [trx]; // TODO: reduce timeout when we can: try { let res = global.ArangoAgency.write([trx]); @@ -1288,9 +1290,9 @@ function migrateAnyServer(plan, current) { // diff current and local and prepare agency transactions or whatever // to update current. will report the errors created locally to the agency let trx = updateCurrentForDatabases(localErrors, current.Databases); - if (Object.keys(trx).length !== 0) { + if (Object.keys(trx).length > 0) { + trx[curVersion] = {op: 'increment'}; trx = [trx]; - trx[0][curVersion] = {op: 'increment'}; // TODO: reduce timeout when we can: try { let res = global.ArangoAgency.write([trx]); diff --git a/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js b/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js index 79ac4cce66..353403e8c4 100644 --- a/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js +++ b/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js @@ -740,7 +740,7 @@ describe('Cluster sync', function() { expect(db._collection('s100001').isLeader()).to.equal(true); }); }); - describe('Update current', function() { + describe('Update current database', function() { beforeEach(function() { db._databases().forEach(database => { if (database !== '_system') { @@ -854,4 +854,7 @@ describe('Cluster sync', function() { expect(result['/arango/Current/Databases/testi/repltest']).to.have.deep.property('new.error', false); }); }); + describe('Update current collection', function() { + + }); }); From d3642f1487da87bd3c4aae3de5e83d7c72d11d9a Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Tue, 24 Jan 2017 18:35:26 +0100 Subject: [PATCH 2/7] First update current tests --- js/server/modules/@arangodb/cluster.js | 26 +++-- .../cluster-sync-test-noncluster-spec.js | 103 +++++++++++++++++- 2 files changed, 114 insertions(+), 15 deletions(-) diff --git a/js/server/modules/@arangodb/cluster.js b/js/server/modules/@arangodb/cluster.js index 2f7cc1238c..0156396edb 100644 --- a/js/server/modules/@arangodb/cluster.js +++ b/js/server/modules/@arangodb/cluster.js @@ -893,8 +893,7 @@ function executePlanForCollections(plannedCollections) { // / @brief updateCurrentForCollections // ///////////////////////////////////////////////////////////////////////////// -function updateCurrentForCollections(localErrors, current) { - let currentCollections = current.Collections; +function updateCurrentForCollections(localErrors, currentCollections) { let ourselves = global.ArangoServerState.id(); let db = require('internal').db; @@ -906,10 +905,12 @@ function updateCurrentForCollections(localErrors, current) { function assembleLocalCollectionInfo(info, error) { let coll = db._collection(info.name); let payload = { - error: error.error, - errorMessage: error.errorMessage, - errorNum: error.errorNum, + error: error.error || false, + errorMessage: error.errorMessage || '', + errorNum: error.errorNum || 0, }; + let indexErrors = fetchKey(error, 'indexes') || {}; + payload.indexes = coll.getIndexes().map(index => { let agencyIndex = {}; Object.assign(agencyIndex, index); @@ -921,14 +922,14 @@ function updateCurrentForCollections(localErrors, current) { agencyIndex.id = index.id; } - if (error.indexes[agencyIndex.id] !== undefined) { - Object.assign(agencyIndex, error.indexes[agencyIndex.id]); + if (indexErrors[agencyIndex.id] !== undefined) { + Object.assign(agencyIndex, indexError); delete error.indexes[agencyIndex.id]; } return agencyIndex; }); // add the remaining errors which do not have a local id - Object.keys(error.indexes).forEach(indexId => { + Object.keys(indexErrors).forEach(indexId => { payload.indexes.push(error.indexes[indexId]); }); @@ -960,9 +961,9 @@ function updateCurrentForCollections(localErrors, current) { if (localCollections.hasOwnProperty(shard)) { let shardInfo = localCollections[shard]; if (shardInfo.isLeader) { - let localCollectionInfo = assembleLocalCollectionInfo(shardInfo, localErrors[shard]); + let localCollectionInfo = assembleLocalCollectionInfo(shardInfo, localErrors[shard] || {}); - let currentCollectionInfo = fetchKey(current, 'Collections', database, shardInfo.planId, shard); + let currentCollectionInfo = fetchKey(currentCollections, database, shardInfo.planId, shard); if (!_.isEqual(localCollectionInfo, currentCollectionInfo)) { trx[curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name] = { op: 'set', @@ -970,7 +971,7 @@ function updateCurrentForCollections(localErrors, current) { }; } } else { - let currentServers = fetchKey(current, 'Collections', database, shardInfo.planId, shard, 'servers'); + let currentServers = fetchKey(currentCollections, database, shardInfo.planId, shard, 'servers'); // we were previously leader and we are done resigning. update current and let supervision handle the rest if (Array.isArray(currentServers) && currentServers[0] === ourselves) { trx[curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name + '/servers'] = { @@ -1242,7 +1243,8 @@ function updateCurrentForDatabases(localErrors, currentDatabases) { for (name in localDatabases) { if (localDatabases.hasOwnProperty(name)) { if (!currentDatabases.hasOwnProperty(name) || - !currentDatabases[name].hasOwnProperty(ourselves)) { + !currentDatabases[name].hasOwnProperty(ourselves) || + currentDatabases[name][ourselves].error == true) { console.debug("adding entry in Current for database '%s'", name); trx = Object.assign(trx, makeAddDatabaseAgencyOperation({error: false, errorNum: 0, name: name, id: localDatabases[name].id, diff --git a/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js b/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js index 353403e8c4..c15e1349b7 100644 --- a/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js +++ b/js/server/tests/cluster-sync/cluster-sync-test-noncluster-spec.js @@ -840,8 +840,12 @@ describe('Cluster sync', function() { id: 1, name: '_system', }, - testi: { - error: 'gut', + }, + testi: { + repltest: { + id: 2, + name: 'testi', + error: true, } }, }; @@ -853,8 +857,101 @@ describe('Cluster sync', function() { expect(result['/arango/Current/Databases/testi/repltest']).to.have.deep.property('new.name', 'testi'); expect(result['/arango/Current/Databases/testi/repltest']).to.have.deep.property('new.error', false); }); + it('should not do anything if nothing happened', function() { + let current = { + _system: { + repltest: { + id: 1, + name: '_system', + }, + }, + }; + let result = cluster.updateCurrentForDatabases({}, current); + expect(Object.keys(result)).to.have.lengthOf(0); + }); }); describe('Update current collection', function() { - + beforeEach(function() { + db._useDatabase('_system'); + db._databases().forEach(database => { + if (database !== '_system') { + db._dropDatabase(database); + } + }); + db._createDatabase('testung'); + db._useDatabase('testung'); + }); + it('should report a new collection in current', function() { + let props = { planId: '888111' }; + let collection = db._create('testi', props); + collection.assumeLeadership(); + let current = { + }; + let result = cluster.updateCurrentForCollections({}, current); + expect(Object.keys(result)).to.have.length.of.at.least(1); + expect(result).to.have.property('/arango/Current/Collections/testung/888111/testi'); + expect(result['/arango/Current/Collections/testung/888111/testi']).to.have.property('op', 'set'); + expect(result['/arango/Current/Collections/testung/888111/testi']).to.have.deep.property('new.servers') + .that.is.an('array') + .that.deep.equals(['repltest']); + }); + it('should not do anything if nothing changed', function() { + let current = { + }; + let result = cluster.updateCurrentForCollections({}, current); + expect(Object.keys(result)).to.have.lengthOf(0); + }); + it('should not report any collections for which we are not leader (will be handled in replication)', function() { + let props = { planId: '888111' }; + let collection = db._create('testi', props); + let current = { + }; + let result = cluster.updateCurrentForCollections({}, current); + expect(Object.keys(result)).to.have.lengthOf(0); + }); + it('should not delete any collections for which we are not a leader locally', function() { + let current = { + testung: { + 888111: { + testi : { "error" : false, "errorMessage" : "", "errorNum" : 0, "indexes" : [ { "id" : "0", "type" : "primary", "fields" : [ "_key" ], "selectivityEstimate" : 1, "unique" : true, "sparse" : false } ], "servers" : [ "the-master", "repltest" ] } + }, + } + }; + let result = cluster.updateCurrentForCollections({}, current); + expect(Object.keys(result)).to.have.lengthOf(0); + }); + it('should resign leadership for which we are no more leader locally', function() { + let props = { planId: '888111' }; + let collection = db._create('testi', props); + let current = { + testung: { + 888111: { + testi : { "error" : false, "errorMessage" : "", "errorNum" : 0, "indexes" : [ { "id" : "0", "type" : "primary", "fields" : [ "_key" ], "selectivityEstimate" : 1, "unique" : true, "sparse" : false } ], "servers" : [ "repltest" ] } + }, + } + }; + let result = cluster.updateCurrentForCollections({}, current); + expect(result).to.be.an('object'); + expect(Object.keys(result)).to.have.lengthOf(1); + expect(result).to.have.property('/arango/Current/Collections/testung/888111/testi/servers') + .that.have.property('op', 'set'); + expect(result).to.have.property('/arango/Current/Collections/testung/888111/testi/servers') + .that.has.property('new') + .with.deep.equal(["_repltest"]); + }); + it('should delete any collections for which we are not a leader locally', function() { + let current = { + testung: { + 888111: { + testi : { "error" : false, "errorMessage" : "", "errorNum" : 0, "indexes" : [ { "id" : "0", "type" : "primary", "fields" : [ "_key" ], "selectivityEstimate" : 1, "unique" : true, "sparse" : false } ], "servers" : [ "repltest" ] } + }, + } + }; + let result = cluster.updateCurrentForCollections({}, current); + expect(result).to.be.an('object'); + expect(Object.keys(result)).to.have.lengthOf(1); + expect(result).to.have.property('/arango/Current/Collections/testung/888111/testi') + .that.has.deep.property('op', 'delete'); + }); }); }); From d91a6af1a71256931425771c6d2bebc8d37abaf7 Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Tue, 24 Jan 2017 18:51:05 +0100 Subject: [PATCH 3/7] Remove singletransaction and fetch collection from vocbase directly --- arangod/V8Server/v8-collection.cpp | 63 +++++++++++--------------- js/server/modules/@arangodb/cluster.js | 2 +- 2 files changed, 27 insertions(+), 38 deletions(-) diff --git a/arangod/V8Server/v8-collection.cpp b/arangod/V8Server/v8-collection.cpp index c082919e08..838e53a546 100644 --- a/arangod/V8Server/v8-collection.cpp +++ b/arangod/V8Server/v8-collection.cpp @@ -956,32 +956,29 @@ static void JS_LeaderResign(v8::FunctionCallbackInfo const& args) { } if (ServerState::instance()->isDBServer()) { - arangodb::LogicalCollection const* collection = + arangodb::LogicalCollection const* v8Collection = TRI_UnwrapClass(args.Holder(), WRP_VOCBASE_COL_TYPE); - if (collection == nullptr) { + if (v8Collection == nullptr) { TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); } - TRI_vocbase_t* vocbase = collection->vocbase(); - std::string collectionName = collection->name(); + TRI_vocbase_t* vocbase = v8Collection->vocbase(); if (vocbase == nullptr) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } - auto transactionContext = std::make_shared(vocbase, true); - - SingleCollectionTransaction trx(transactionContext, collectionName, - TRI_TRANSACTION_READ); - int res = trx.begin(); - if (res != TRI_ERROR_NO_ERROR) { - TRI_V8_THROW_EXCEPTION(res); + std::string collectionName = v8Collection->name(); + auto collection = vocbase->lookupCollection(collectionName); + if (collection == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND); } + // do not reset followers at this time...we are still the only source of truth // to trust... //trx.documentCollection()->followers()->clear(); - trx.documentCollection()->followers()->setLeader(false); + collection->followers()->setLeader(false); } TRI_V8_RETURN_UNDEFINED(); @@ -1003,30 +1000,26 @@ static void JS_AssumeLeadership(v8::FunctionCallbackInfo const& args) } if (ServerState::instance()->isDBServer()) { - arangodb::LogicalCollection const* collection = + arangodb::LogicalCollection const* v8Collection = TRI_UnwrapClass(args.Holder(), WRP_VOCBASE_COL_TYPE); - if (collection == nullptr) { + if (v8Collection == nullptr) { TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); } - TRI_vocbase_t* vocbase = collection->vocbase(); - std::string collectionName = collection->name(); + TRI_vocbase_t* vocbase = v8Collection->vocbase(); if (vocbase == nullptr) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } - auto transactionContext = std::make_shared(vocbase, true); - - SingleCollectionTransaction trx(transactionContext, collectionName, - TRI_TRANSACTION_READ); - int res = trx.begin(); - if (res != TRI_ERROR_NO_ERROR) { - TRI_V8_THROW_EXCEPTION(res); + std::string collectionName = v8Collection->name(); + auto collection = vocbase->lookupCollection(collectionName); + if (collection == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND); } - trx.documentCollection()->followers()->clear(); - trx.documentCollection()->followers()->setLeader(true); + collection->followers()->clear(); + collection->followers()->setLeader(true); } TRI_V8_RETURN_UNDEFINED(); @@ -1094,29 +1087,25 @@ static void JS_GetFollowers(v8::FunctionCallbackInfo const& args) { v8::Handle list = v8::Array::New(isolate); if (ServerState::instance()->isDBServer()) { - arangodb::LogicalCollection const* collection = + arangodb::LogicalCollection const* v8Collection = TRI_UnwrapClass(args.Holder(), WRP_VOCBASE_COL_TYPE); - if (collection == nullptr) { + if (v8Collection == nullptr) { TRI_V8_THROW_EXCEPTION_INTERNAL("cannot extract collection"); } - TRI_vocbase_t* vocbase = collection->vocbase(); - std::string collectionName = collection->name(); + TRI_vocbase_t* vocbase = v8Collection->vocbase(); if (vocbase == nullptr) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } - auto transactionContext = std::make_shared(vocbase, true); - - SingleCollectionTransaction trx(transactionContext, collectionName, - TRI_TRANSACTION_READ); - int res = trx.begin(); - if (res != TRI_ERROR_NO_ERROR) { - TRI_V8_THROW_EXCEPTION(res); + std::string collectionName = v8Collection->name(); + auto collection = vocbase->lookupCollection(collectionName); + if (collection == nullptr) { + TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND); } - std::unique_ptr const& followerInfo = trx.documentCollection()->followers(); + std::unique_ptr const& followerInfo = collection->followers(); std::shared_ptr const> followers = followerInfo->get(); uint32_t i = 0; for (auto const& n : *followers) { diff --git a/js/server/modules/@arangodb/cluster.js b/js/server/modules/@arangodb/cluster.js index 0156396edb..6ae2ca4efc 100644 --- a/js/server/modules/@arangodb/cluster.js +++ b/js/server/modules/@arangodb/cluster.js @@ -1244,7 +1244,7 @@ function updateCurrentForDatabases(localErrors, currentDatabases) { if (localDatabases.hasOwnProperty(name)) { if (!currentDatabases.hasOwnProperty(name) || !currentDatabases[name].hasOwnProperty(ourselves) || - currentDatabases[name][ourselves].error == true) { + currentDatabases[name][ourselves].error) { console.debug("adding entry in Current for database '%s'", name); trx = Object.assign(trx, makeAddDatabaseAgencyOperation({error: false, errorNum: 0, name: name, id: localDatabases[name].id, From acbb8926123a836cca17555cbf6a516e46f2249b Mon Sep 17 00:00:00 2001 From: Andreas Streichardt Date: Tue, 24 Jan 2017 19:07:34 +0100 Subject: [PATCH 4/7] Classic...fix unittest..fail real system --- js/server/modules/@arangodb/cluster.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/js/server/modules/@arangodb/cluster.js b/js/server/modules/@arangodb/cluster.js index 6ae2ca4efc..731aafb268 100644 --- a/js/server/modules/@arangodb/cluster.js +++ b/js/server/modules/@arangodb/cluster.js @@ -923,7 +923,7 @@ function updateCurrentForCollections(localErrors, currentCollections) { } if (indexErrors[agencyIndex.id] !== undefined) { - Object.assign(agencyIndex, indexError); + Object.assign(agencyIndex, indexErrors[agencyIndex.id]); delete error.indexes[agencyIndex.id]; } return agencyIndex; @@ -1121,7 +1121,7 @@ function migratePrimary(plan, current) { // diff current and local and prepare agency transactions or whatever // to update current. Will report the errors created locally to the agency - let trx = updateCurrentForCollections(localErrors, current); + let trx = updateCurrentForCollections(localErrors, current.Collections); if (Object.keys(trx).length > 0) { trx[curVersion] = {op: 'increment'}; trx = [trx]; From 1f51117c3640bd8a01d2ede37cde099cce5d3a30 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Wed, 25 Jan 2017 09:25:33 +0100 Subject: [PATCH 5/7] added assertions for bug hunt --- arangod/Statistics/statistics.cpp | 9 +++++++++ arangod/Statistics/statistics.h | 8 +++++--- 2 files changed, 14 insertions(+), 3 deletions(-) diff --git a/arangod/Statistics/statistics.cpp b/arangod/Statistics/statistics.cpp index 15205e78a4..533f01269e 100644 --- a/arangod/Statistics/statistics.cpp +++ b/arangod/Statistics/statistics.cpp @@ -182,6 +182,8 @@ static void ProcessRequestStatistics(TRI_request_statistics_t* statistics) { statistics->reset(); // put statistics item back onto the freelist + TRI_ASSERT(!statistics->_released); + statistics->_released = true; int tries = 0; while (++tries < 1000) { if (RequestFreeList.push(statistics)) { @@ -221,6 +223,8 @@ TRI_request_statistics_t* TRI_AcquireRequestStatistics() { TRI_request_statistics_t* statistics = nullptr; if (StatisticsFeature::enabled() && RequestFreeList.pop(statistics)) { + TRI_ASSERT(statistics->_released); + statistics->_released = false; return statistics; } @@ -238,6 +242,8 @@ void TRI_ReleaseRequestStatistics(TRI_request_statistics_t* statistics) { return; } + TRI_ASSERT(!statistics->_released); + if (!statistics->_ignore) { bool ok = RequestFinishedList.push(statistics); TRI_ASSERT(ok); @@ -245,6 +251,7 @@ void TRI_ReleaseRequestStatistics(TRI_request_statistics_t* statistics) { statistics->reset(); bool ok = RequestFreeList.push(statistics); + statistics->_released = true; TRI_ASSERT(ok); } } @@ -430,6 +437,7 @@ void StatisticsThread::run() { { TRI_request_statistics_t* entry = nullptr; while (RequestFreeList.pop(entry)) { + TRI_ASSERT(entry->_released); delete entry; } } @@ -592,6 +600,7 @@ void TRI_InitializeStatistics() { for (size_t i = 0; i < QUEUE_SIZE; ++i) { auto entry = new TRI_request_statistics_t; + TRI_ASSERT(entry->_released); bool ok = RequestFreeList.push(entry); TRI_ASSERT(ok); } diff --git a/arangod/Statistics/statistics.h b/arangod/Statistics/statistics.h index 0571c621e8..db3f2e2adc 100644 --- a/arangod/Statistics/statistics.h +++ b/arangod/Statistics/statistics.h @@ -52,12 +52,13 @@ struct TRI_request_statistics_t { _async(false), _tooLarge(false), _executeError(false), - _ignore(false) { + _ignore(false), + _released(true) { #ifdef USE_DEV_TIMERS _id = nullptr; #endif } - + void reset() { _readStart = 0.0; _readEnd = 0.0; @@ -79,7 +80,7 @@ struct TRI_request_statistics_t { _timings.clear(); #endif } - + std::string to_string(); void trace_log(); @@ -101,6 +102,7 @@ struct TRI_request_statistics_t { bool _tooLarge; bool _executeError; bool _ignore; + bool _released; #ifdef USE_DEV_TIMERS void* _id; From 41e3268ccde9b97f15514a55af1a06f49ebbb049 Mon Sep 17 00:00:00 2001 From: jsteemann Date: Wed, 25 Jan 2017 10:26:30 +0100 Subject: [PATCH 6/7] clean up job queues on shutdown --- arangod/Scheduler/JobQueue.cpp | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/arangod/Scheduler/JobQueue.cpp b/arangod/Scheduler/JobQueue.cpp index 68c88971ef..228ccf89cc 100644 --- a/arangod/Scheduler/JobQueue.cpp +++ b/arangod/Scheduler/JobQueue.cpp @@ -90,6 +90,14 @@ class JobQueueThread final : public Thread { _jobQueue->waitForWork(); } } + + // clear all non-processed jobs + for (size_t i = 0; i < JobQueue::SYSTEM_QUEUE_SIZE; ++i) { + Job* job = nullptr; + while (_jobQueue->pop(i, job)) { + delete job; + } + } } private: From 208729ae103d489731cc9bb48a1867d204461cac Mon Sep 17 00:00:00 2001 From: jsteemann Date: Wed, 25 Jan 2017 10:26:48 +0100 Subject: [PATCH 7/7] use make_unique --- arangod/GeneralServer/GeneralCommTask.cpp | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/arangod/GeneralServer/GeneralCommTask.cpp b/arangod/GeneralServer/GeneralCommTask.cpp index 49963dd8c6..ae6dad8657 100644 --- a/arangod/GeneralServer/GeneralCommTask.cpp +++ b/arangod/GeneralServer/GeneralCommTask.cpp @@ -242,14 +242,13 @@ bool GeneralCommTask::handleRequestAsync(std::shared_ptr handler, size_t queue = handler->queue(); auto self = shared_from_this(); - std::unique_ptr job( - new Job(_server, std::move(handler), + auto job = std::make_unique(_server, std::move(handler), [self, this](std::shared_ptr h) { JobGuard guard(_loop); guard.work(); h->asyncRunEngine(); - })); + }); return SchedulerFeature::SCHEDULER->jobQueue()->queue(queue, std::move(job)); }