1
0
Fork 0

Cmake 5.0 complains about unused lambda captures (#3390)

This commit is contained in:
m0ppers 2017-10-13 12:20:48 +02:00 committed by Frank Celler
parent c92dc55764
commit bb1d303473
13 changed files with 979 additions and 685 deletions

View File

@ -238,7 +238,7 @@ bool AddFollower::start() {
// --- Plan changes
doForAllShards(_snapshot, _database, shardsLikeMe,
[this, &trx, &chosen](Slice plan, Slice current, std::string& planPath) {
[&trx, &chosen](Slice plan, Slice current, std::string& planPath) {
trx.add(VPackValue(planPath));
{ VPackArrayBuilder serverList(&trx);
for (auto const& srv : VPackArrayIterator(plan)) {

View File

@ -539,7 +539,7 @@ JOB_STATUS MoveShard::pendingLeader() {
{ VPackObjectBuilder trxObject(&trx);
VPackObjectBuilder preObject(&pre);
doForAllShards(_snapshot, _database, shardsLikeMe,
[this, &pre](Slice plan, Slice current, std::string& planPath) {
[&pre](Slice plan, Slice current, std::string& planPath) {
// Precondition: Plan still as it was
pre.add(VPackValue(planPath));
{ VPackObjectBuilder guard(&pre);
@ -586,7 +586,7 @@ JOB_STATUS MoveShard::pendingFollower() {
size_t done = 0; // count the number of shards done
doForAllShards(_snapshot, _database, shardsLikeMe,
[this, &done](Slice plan, Slice current, std::string& planPath) {
[&done](Slice plan, Slice current, std::string& planPath) {
if (ClusterHelpers::compareServerLists(plan, current)) {
++done;
}

View File

@ -178,7 +178,7 @@ bool RemoveFollower::start() {
}
}
doForAllShards(_snapshot, _database, shardsLikeMe,
[this, &planned, &overview, &leaderBad](Slice plan,
[&planned, &overview, &leaderBad](Slice plan,
Slice current,
std::string& planPath) {
if (current.length() > 0) {
@ -320,7 +320,7 @@ bool RemoveFollower::start() {
// --- Plan changes
doForAllShards(_snapshot, _database, shardsLikeMe,
[this, &trx, &chosenToRemove](Slice plan, Slice current,
[&trx, &chosenToRemove](Slice plan, Slice current,
std::string& planPath) {
trx.add(VPackValue(planPath));
{ VPackArrayBuilder serverList(&trx);

File diff suppressed because it is too large Load Diff

View File

@ -337,7 +337,7 @@ bool GeneralCommTask::handleRequestAsync(std::shared_ptr<RestHandler> handler,
if (store) {
auto self = shared_from_this();
handler->initEngine(_loop, [this, self](RestHandler* handler) {
handler->initEngine(_loop, [self](RestHandler* handler) {
GeneralServerFeature::JOB_MANAGER->finishAsyncJob(handler);
});
} else {
@ -349,7 +349,7 @@ bool GeneralCommTask::handleRequestAsync(std::shared_ptr<RestHandler> handler,
auto job = std::make_unique<Job>(
_server, std::move(handler),
[self, this](std::shared_ptr<RestHandler> h) { h->asyncRunEngine(); });
[self](std::shared_ptr<RestHandler> h) { h->asyncRunEngine(); });
return SchedulerFeature::SCHEDULER->queue(std::move(job));
}

View File

@ -75,7 +75,7 @@ void MMFilesCleanupThread::run() {
// check if we can get the compactor lock exclusively
// check if compaction is currently disallowed
engine->tryPreventCompaction(_vocbase, [this, &collections, &iterations](TRI_vocbase_t* vocbase) {
engine->tryPreventCompaction(_vocbase, [this, &collections](TRI_vocbase_t* vocbase) {
try {
// copy all collections
collections = vocbase->collections(true);

View File

@ -183,7 +183,7 @@ struct SLPAGraphFormat : public GraphFormat<SLPAValue, int8_t> {
}
}
std::sort(vec.begin(), vec.end(),
[ptr](std::pair<uint64_t, double> a, std::pair<uint64_t, double> b) {
[](std::pair<uint64_t, double> a, std::pair<uint64_t, double> b) {
return a.second > b.second;
});

View File

@ -1243,7 +1243,7 @@ void TRI_replication_applier_t::toVelocyPack(VPackBuilder& builder) const {
arangodb::basics::ScopeGuard guard{
[]() -> void {},
[&state, &config]() -> void {
[&state]() -> void {
TRI_DestroyStateReplicationApplier(&state);
}};

File diff suppressed because it is too large Load Diff

View File

@ -87,7 +87,7 @@ TEST_CASE("cache::Manager", "[cache][!hide][longRunning]") {
uint64_t operationCount = 4 * 1024 * 1024;
std::atomic<uint64_t> hitCount(0);
std::atomic<uint64_t> missCount(0);
auto worker = [&manager, &caches, cacheCount, initialInserts,
auto worker = [&caches, cacheCount, initialInserts,
operationCount, &hitCount,
&missCount](uint64_t lower, uint64_t upper) -> void {
// fill with some initial data

View File

@ -209,7 +209,7 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
uint64_t operationCount = 16 * 1024 * 1024;
std::atomic<uint64_t> hitCount(0);
std::atomic<uint64_t> missCount(0);
auto worker = [&manager, &cache, initialInserts, operationCount, &hitCount,
auto worker = [&cache, initialInserts, operationCount, &hitCount,
&missCount](uint64_t lower, uint64_t upper) -> void {
// fill with some initial data
for (uint64_t i = 0; i < initialInserts; i++) {

View File

@ -82,7 +82,7 @@ TEST_CASE("cache::Rebalancer", "[cache][!hide][longRunning]") {
uint64_t operationCount = 4 * 1024 * 1024;
std::atomic<uint64_t> hitCount(0);
std::atomic<uint64_t> missCount(0);
auto worker = [&manager, &caches, cacheCount, initialInserts,
auto worker = [&caches, cacheCount, initialInserts,
operationCount, &hitCount,
&missCount](uint64_t lower, uint64_t upper) -> void {
// fill with some initial data

View File

@ -159,7 +159,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
}
};
auto writeWorker = [&store, &writersDone, writerCount, totalDocuments,
auto writeWorker = [&store, &writersDone,
batchSize, &writeWaitInterval](uint64_t lower,
uint64_t upper) -> void {
uint64_t batches = (upper + 1 - lower) / batchSize;
@ -250,7 +250,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
}
};
auto writeWorker = [&store, &writersDone, writerCount, totalDocuments,
auto writeWorker = [&store, &writersDone,
writeBatchSize, &writeWaitInterval](
uint64_t lower, uint64_t upper) -> void {
uint64_t batches = (upper + 1 - lower) / writeBatchSize;
@ -356,8 +356,8 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
}
};
auto writeWorker = [&store1, &store2, &storeBias, &writersDone, writerCount,
totalDocuments, writeBatchSize, &writeWaitInterval](
auto writeWorker = [&store1, &store2, &storeBias, &writersDone,
writeBatchSize, &writeWaitInterval](
uint64_t lower, uint64_t upper) -> void {
uint64_t batches = (upper + 1 - lower) / writeBatchSize;
uint64_t choice = lower;