mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
0ffed98bd3
|
@ -182,7 +182,7 @@ class Agent : public arangodb::Thread {
|
||||||
};
|
};
|
||||||
|
|
||||||
inline arangodb::consensus::write_ret_t transact (
|
inline arangodb::consensus::write_ret_t transact (
|
||||||
Agent* _agent, Builder const& transaction) {
|
Agent* _agent, Builder const& transaction, bool waitForCommit = true) {
|
||||||
|
|
||||||
query_t envelope = std::make_shared<Builder>();
|
query_t envelope = std::make_shared<Builder>();
|
||||||
|
|
||||||
|
@ -195,8 +195,15 @@ inline arangodb::consensus::write_ret_t transact (
|
||||||
LOG_TOPIC(ERR, Logger::AGENCY) << e.what();
|
LOG_TOPIC(ERR, Logger::AGENCY) << e.what();
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << envelope->toJson();
|
LOG_TOPIC(INFO, Logger::AGENCY) << envelope->toJson();
|
||||||
return _agent->write(envelope);
|
auto ret = _agent->write(envelope);
|
||||||
|
if (waitForCommit) {
|
||||||
|
auto maximum = *std::max_element(ret.indices.begin(), ret.indices.end());
|
||||||
|
if (maximum > 0) { // some baby has worked
|
||||||
|
_agent->waitFor(maximum);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -245,7 +245,7 @@ bool CleanOutServer::scheduleMoveShards() const {
|
||||||
"Range error picking destination for shard " + shard.first;
|
"Range error picking destination for shard " + shard.first;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shedule move
|
// Schedule move
|
||||||
MoveShard (
|
MoveShard (
|
||||||
_snapshot, _agent, _jobId + "-" + std::to_string(sub++), _jobId,
|
_snapshot, _agent, _jobId + "-" + std::to_string(sub++), _jobId,
|
||||||
_agencyPrefix, database.first, collptr.first, shard.first, _server,
|
_agencyPrefix, database.first, collptr.first, shard.first, _server,
|
||||||
|
|
|
@ -114,8 +114,13 @@ Node::Node(Node const& other) :
|
||||||
_node_name(other._node_name),
|
_node_name(other._node_name),
|
||||||
_parent(nullptr),
|
_parent(nullptr),
|
||||||
_store(nullptr),
|
_store(nullptr),
|
||||||
_children(other._children),
|
_value(other._value) {
|
||||||
_value(other._value) {}
|
|
||||||
|
for (auto const& p : other._children) {
|
||||||
|
auto copy = std::make_shared<Node>(*p.second);
|
||||||
|
_children.insert(std::make_pair(p.first, copy));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// Assignment of rhs slice
|
// Assignment of rhs slice
|
||||||
Node& Node::operator=(VPackSlice const& slice) {
|
Node& Node::operator=(VPackSlice const& slice) {
|
||||||
|
@ -152,7 +157,11 @@ Node& Node::operator=(Node const& rhs) {
|
||||||
// Must not move rhs's _parent, _ttl, _observers
|
// Must not move rhs's _parent, _ttl, _observers
|
||||||
removeTimeToLive();
|
removeTimeToLive();
|
||||||
_node_name = rhs._node_name;
|
_node_name = rhs._node_name;
|
||||||
_children = rhs._children;
|
_children.clear();
|
||||||
|
for (auto const& p : rhs._children) {
|
||||||
|
auto copy = std::make_shared<Node>(*p.second);
|
||||||
|
_children.insert(std::make_pair(p.first, copy));
|
||||||
|
}
|
||||||
_value = rhs._value;
|
_value = rhs._value;
|
||||||
return *this;
|
return *this;
|
||||||
}
|
}
|
||||||
|
|
|
@ -268,7 +268,6 @@ void Supervision::run() {
|
||||||
void Supervision::workJobs() {
|
void Supervision::workJobs() {
|
||||||
|
|
||||||
Node::Children const todos = _snapshot(toDoPrefix).children();
|
Node::Children const todos = _snapshot(toDoPrefix).children();
|
||||||
Node::Children const pends = _snapshot(pendingPrefix).children();
|
|
||||||
|
|
||||||
for (auto const& todoEnt : todos) {
|
for (auto const& todoEnt : todos) {
|
||||||
Node const& job = *todoEnt.second;
|
Node const& job = *todoEnt.second;
|
||||||
|
@ -285,6 +284,9 @@ void Supervision::workJobs() {
|
||||||
} catch (std::exception const&) {}
|
} catch (std::exception const&) {}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_snapshot = _agent->readDB().get(_agencyPrefix);
|
||||||
|
Node::Children const pends = _snapshot(pendingPrefix).children();
|
||||||
|
|
||||||
for (auto const& pendEnt : pends) {
|
for (auto const& pendEnt : pends) {
|
||||||
Node const& job = *pendEnt.second;
|
Node const& job = *pendEnt.second;
|
||||||
|
|
||||||
|
|
|
@ -849,10 +849,12 @@ function dropLocalCollections (plannedCollections, writeLocked) {
|
||||||
// is also no problem, since the leader will soon notice
|
// is also no problem, since the leader will soon notice
|
||||||
// that the shard here is gone and will drop us automatically:
|
// that the shard here is gone and will drop us automatically:
|
||||||
var servers = shardMap[collection];
|
var servers = shardMap[collection];
|
||||||
var endpoint = ArangoClusterInfo.getServerEndpoint(servers[0]);
|
if (servers !== undefined) {
|
||||||
try {
|
var endpoint = ArangoClusterInfo.getServerEndpoint(servers[0]);
|
||||||
removeShardFollower(endpoint, database, collection);
|
try {
|
||||||
} catch (err) {
|
removeShardFollower(endpoint, database, collection);
|
||||||
|
} catch (err) {
|
||||||
|
}
|
||||||
}
|
}
|
||||||
console.info("dropping local shard '%s/%s' of '%s/%s",
|
console.info("dropping local shard '%s/%s' of '%s/%s",
|
||||||
database,
|
database,
|
||||||
|
@ -995,11 +997,11 @@ function synchronizeOneShard(database, shard, planId, leader) {
|
||||||
}
|
}
|
||||||
catch (err) {
|
catch (err) {
|
||||||
console.info("synchronizeOneShard: syncCollection did not work,",
|
console.info("synchronizeOneShard: syncCollection did not work,",
|
||||||
"trying again later for shard", shard);
|
"trying again later for shard", shard, err);
|
||||||
}
|
}
|
||||||
if (--count <= 0) {
|
if (--count <= 0) {
|
||||||
console.error("synchronizeOneShard: syncCollection did not work",
|
console.error("synchronizeOneShard: syncCollection did not work",
|
||||||
"after many tries, giving up on shard", shard);
|
"after many tries, giving up on shard", shard, err);
|
||||||
throw "syncCollection did not work";
|
throw "syncCollection did not work";
|
||||||
}
|
}
|
||||||
wait(1.0);
|
wait(1.0);
|
||||||
|
|
Loading…
Reference in New Issue