1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
Andreas Streichardt 2016-06-08 15:23:23 +02:00
commit 0ffed98bd3
5 changed files with 34 additions and 14 deletions

View File

@ -182,7 +182,7 @@ class Agent : public arangodb::Thread {
};
inline arangodb::consensus::write_ret_t transact (
Agent* _agent, Builder const& transaction) {
Agent* _agent, Builder const& transaction, bool waitForCommit = true) {
query_t envelope = std::make_shared<Builder>();
@ -195,8 +195,15 @@ inline arangodb::consensus::write_ret_t transact (
LOG_TOPIC(ERR, Logger::AGENCY) << e.what();
}
LOG_TOPIC(DEBUG, Logger::AGENCY) << envelope->toJson();
return _agent->write(envelope);
LOG_TOPIC(INFO, Logger::AGENCY) << envelope->toJson();
auto ret = _agent->write(envelope);
if (waitForCommit) {
auto maximum = *std::max_element(ret.indices.begin(), ret.indices.end());
if (maximum > 0) { // some baby has worked
_agent->waitFor(maximum);
}
}
return ret;
}

View File

@ -245,7 +245,7 @@ bool CleanOutServer::scheduleMoveShards() const {
"Range error picking destination for shard " + shard.first;
}
// Shedule move
// Schedule move
MoveShard (
_snapshot, _agent, _jobId + "-" + std::to_string(sub++), _jobId,
_agencyPrefix, database.first, collptr.first, shard.first, _server,

View File

@ -114,8 +114,13 @@ Node::Node(Node const& other) :
_node_name(other._node_name),
_parent(nullptr),
_store(nullptr),
_children(other._children),
_value(other._value) {}
_value(other._value) {
for (auto const& p : other._children) {
auto copy = std::make_shared<Node>(*p.second);
_children.insert(std::make_pair(p.first, copy));
}
}
// Assignment of rhs slice
Node& Node::operator=(VPackSlice const& slice) {
@ -152,7 +157,11 @@ Node& Node::operator=(Node const& rhs) {
// Must not move rhs's _parent, _ttl, _observers
removeTimeToLive();
_node_name = rhs._node_name;
_children = rhs._children;
_children.clear();
for (auto const& p : rhs._children) {
auto copy = std::make_shared<Node>(*p.second);
_children.insert(std::make_pair(p.first, copy));
}
_value = rhs._value;
return *this;
}

View File

@ -268,7 +268,6 @@ void Supervision::run() {
void Supervision::workJobs() {
Node::Children const todos = _snapshot(toDoPrefix).children();
Node::Children const pends = _snapshot(pendingPrefix).children();
for (auto const& todoEnt : todos) {
Node const& job = *todoEnt.second;
@ -285,6 +284,9 @@ void Supervision::workJobs() {
} catch (std::exception const&) {}
}
_snapshot = _agent->readDB().get(_agencyPrefix);
Node::Children const pends = _snapshot(pendingPrefix).children();
for (auto const& pendEnt : pends) {
Node const& job = *pendEnt.second;

View File

@ -849,10 +849,12 @@ function dropLocalCollections (plannedCollections, writeLocked) {
// is also no problem, since the leader will soon notice
// that the shard here is gone and will drop us automatically:
var servers = shardMap[collection];
var endpoint = ArangoClusterInfo.getServerEndpoint(servers[0]);
try {
removeShardFollower(endpoint, database, collection);
} catch (err) {
if (servers !== undefined) {
var endpoint = ArangoClusterInfo.getServerEndpoint(servers[0]);
try {
removeShardFollower(endpoint, database, collection);
} catch (err) {
}
}
console.info("dropping local shard '%s/%s' of '%s/%s",
database,
@ -995,11 +997,11 @@ function synchronizeOneShard(database, shard, planId, leader) {
}
catch (err) {
console.info("synchronizeOneShard: syncCollection did not work,",
"trying again later for shard", shard);
"trying again later for shard", shard, err);
}
if (--count <= 0) {
console.error("synchronizeOneShard: syncCollection did not work",
"after many tries, giving up on shard", shard);
"after many tries, giving up on shard", shard, err);
throw "syncCollection did not work";
}
wait(1.0);