mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
b6df44a348
|
@ -1,6 +1,9 @@
|
||||||
devel
|
devel
|
||||||
-----
|
-----
|
||||||
|
|
||||||
|
* change undocumented behaviour in case of invalid revision ids in
|
||||||
|
If-Match and If-None-Match headers from 400 (BAD) to 412 (PRECONDITION
|
||||||
|
FAILED).
|
||||||
|
|
||||||
v3.2.alpha1 (2017-02-05)
|
v3.2.alpha1 (2017-02-05)
|
||||||
------------------------
|
------------------------
|
||||||
|
|
|
@ -12,6 +12,7 @@ By default, cloning the github repository will checkout **devel**. This version
|
||||||
contains the development version of the ArangoDB. Use this branch if you want
|
contains the development version of the ArangoDB. Use this branch if you want
|
||||||
to make changes to the ArangoDB source.
|
to make changes to the ArangoDB source.
|
||||||
|
|
||||||
|
On windows you first [need to allow and enable symlinks for your user](https://github.com/git-for-windows/git/wiki/Symbolic-Links#allowing-non-administrators-to-create-symbolic-links).
|
||||||
We now use [git submodules](https://git-scm.com/docs/git-submodule) for V8 and Rocksdb.
|
We now use [git submodules](https://git-scm.com/docs/git-submodule) for V8 and Rocksdb.
|
||||||
Since the V8 git repository also requires external addons to be present, we end
|
Since the V8 git repository also requires external addons to be present, we end
|
||||||
up with recursive submodules. Thus a clone command now has to look like:
|
up with recursive submodules. Thus a clone command now has to look like:
|
||||||
|
@ -21,5 +22,8 @@ up with recursive submodules. Thus a clone command now has to look like:
|
||||||
git submodule update --recursive
|
git submodule update --recursive
|
||||||
git submodule update --init --recursive
|
git submodule update --init --recursive
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Please checkout the [cookbook](https://docs.arangodb.com/cookbook) on how to
|
Please checkout the [cookbook](https://docs.arangodb.com/cookbook) on how to
|
||||||
compile ArangoDB.
|
compile ArangoDB.
|
||||||
|
|
|
@ -190,20 +190,20 @@ describe ArangoDB do
|
||||||
hdr = { "if-match" => "\"*abcd\"" }
|
hdr = { "if-match" => "\"*abcd\"" }
|
||||||
doc = ArangoDB.log_delete("#{prefix}-rev-invalid", cmd, :headers => hdr )
|
doc = ArangoDB.log_delete("#{prefix}-rev-invalid", cmd, :headers => hdr )
|
||||||
|
|
||||||
doc.code.should eq(400)
|
doc.code.should eq(412)
|
||||||
doc.parsed_response['error'].should eq(true)
|
doc.parsed_response['error'].should eq(true)
|
||||||
doc.parsed_response['errorNum'].should eq(400)
|
doc.parsed_response['errorNum'].should eq(1200)
|
||||||
doc.parsed_response['code'].should eq(400)
|
doc.parsed_response['code'].should eq(412)
|
||||||
|
|
||||||
# delete document, invalid revision
|
# delete document, invalid revision
|
||||||
cmd = "/_api/document/#{did}"
|
cmd = "/_api/document/#{did}"
|
||||||
hdr = { "if-match" => "'*abcd'" }
|
hdr = { "if-match" => "'*abcd'" }
|
||||||
doc = ArangoDB.log_delete("#{prefix}-rev-invalid", cmd, :headers => hdr)
|
doc = ArangoDB.log_delete("#{prefix}-rev-invalid", cmd, :headers => hdr)
|
||||||
|
|
||||||
doc.code.should eq(400)
|
doc.code.should eq(412)
|
||||||
doc.parsed_response['error'].should eq(true)
|
doc.parsed_response['error'].should eq(true)
|
||||||
doc.parsed_response['errorNum'].should eq(400)
|
doc.parsed_response['errorNum'].should eq(1200)
|
||||||
doc.parsed_response['code'].should eq(400)
|
doc.parsed_response['code'].should eq(412)
|
||||||
|
|
||||||
# delete document, correct revision
|
# delete document, correct revision
|
||||||
cmd = "/_api/document/#{did}"
|
cmd = "/_api/document/#{did}"
|
||||||
|
|
|
@ -567,7 +567,7 @@ describe ArangoDB do
|
||||||
hdr = { "if-match" => "'*abcd'" }
|
hdr = { "if-match" => "'*abcd'" }
|
||||||
doc = ArangoDB.log_head("#{prefix}-head-rev-invalid", cmd, :headers => hdr)
|
doc = ArangoDB.log_head("#{prefix}-head-rev-invalid", cmd, :headers => hdr)
|
||||||
|
|
||||||
doc.code.should eq(400)
|
doc.code.should eq(412)
|
||||||
end
|
end
|
||||||
|
|
||||||
end
|
end
|
||||||
|
|
|
@ -266,20 +266,20 @@ describe ArangoDB do
|
||||||
hdr = { "if-match" => "\"*abcd\"" }
|
hdr = { "if-match" => "\"*abcd\"" }
|
||||||
doc = ArangoDB.log_put("#{prefix}-rev-invalid", cmd, :headers => hdr, :body => body)
|
doc = ArangoDB.log_put("#{prefix}-rev-invalid", cmd, :headers => hdr, :body => body)
|
||||||
|
|
||||||
doc.code.should eq(400)
|
doc.code.should eq(412)
|
||||||
doc.parsed_response['error'].should eq(true)
|
doc.parsed_response['error'].should eq(true)
|
||||||
doc.parsed_response['errorNum'].should eq(400)
|
doc.parsed_response['errorNum'].should eq(1200)
|
||||||
doc.parsed_response['code'].should eq(400)
|
doc.parsed_response['code'].should eq(412)
|
||||||
|
|
||||||
# update document, invalid revision
|
# update document, invalid revision
|
||||||
cmd = "/_api/document/#{did}"
|
cmd = "/_api/document/#{did}"
|
||||||
hdr = { "if-match" => "'*abcd'" }
|
hdr = { "if-match" => "'*abcd'" }
|
||||||
doc = ArangoDB.log_put("#{prefix}-rev-invalid", cmd, :headers => hdr, :body => body)
|
doc = ArangoDB.log_put("#{prefix}-rev-invalid", cmd, :headers => hdr, :body => body)
|
||||||
|
|
||||||
doc.code.should eq(400)
|
doc.code.should eq(412)
|
||||||
doc.parsed_response['error'].should eq(true)
|
doc.parsed_response['error'].should eq(true)
|
||||||
doc.parsed_response['errorNum'].should eq(400)
|
doc.parsed_response['errorNum'].should eq(1200)
|
||||||
doc.parsed_response['code'].should eq(400)
|
doc.parsed_response['code'].should eq(412)
|
||||||
|
|
||||||
# update document, correct revision
|
# update document, correct revision
|
||||||
cmd = "/_api/document/#{did}"
|
cmd = "/_api/document/#{did}"
|
||||||
|
|
|
@ -1529,7 +1529,15 @@ AgencyCommResult AgencyComm::send(
|
||||||
<< "': " << body;
|
<< "': " << body;
|
||||||
|
|
||||||
arangodb::httpclient::SimpleHttpClient client(connection, timeout, false);
|
arangodb::httpclient::SimpleHttpClient client(connection, timeout, false);
|
||||||
client.setJwt(ClusterComm::instance()->jwt());
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens during controlled shutdown
|
||||||
|
result._message = "could not send request to agency because of shutdown";
|
||||||
|
LOG_TOPIC(TRACE, Logger::AGENCYCOMM) << "could not send request to agency";
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
client.setJwt(cc->jwt());
|
||||||
client.keepConnectionOnDestruction(true);
|
client.keepConnectionOnDestruction(true);
|
||||||
|
|
||||||
// set up headers
|
// set up headers
|
||||||
|
|
|
@ -313,6 +313,11 @@ bool Agent::recvAppendEntriesRPC(
|
||||||
|
|
||||||
/// Leader's append entries
|
/// Leader's append entries
|
||||||
void Agent::sendAppendEntriesRPC() {
|
void Agent::sendAppendEntriesRPC() {
|
||||||
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens during controlled shutdown
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// _lastSent, _lastHighest and _confirmed only accessed in main thread
|
// _lastSent, _lastHighest and _confirmed only accessed in main thread
|
||||||
std::string const myid = id();
|
std::string const myid = id();
|
||||||
|
@ -387,7 +392,7 @@ void Agent::sendAppendEntriesRPC() {
|
||||||
// Send request
|
// Send request
|
||||||
auto headerFields =
|
auto headerFields =
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
cc->asyncRequest(
|
||||||
"1", 1, _config.poolAt(followerId),
|
"1", 1, _config.poolAt(followerId),
|
||||||
arangodb::rest::RequestType::POST, path.str(),
|
arangodb::rest::RequestType::POST, path.str(),
|
||||||
std::make_shared<std::string>(builder.toJson()), headerFields,
|
std::make_shared<std::string>(builder.toJson()), headerFields,
|
||||||
|
@ -1002,6 +1007,11 @@ TimePoint const& Agent::leaderSince() const {
|
||||||
|
|
||||||
// Notify inactive pool members of configuration change()
|
// Notify inactive pool members of configuration change()
|
||||||
void Agent::notifyInactive() const {
|
void Agent::notifyInactive() const {
|
||||||
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens during controlled shutdown
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
std::map<std::string, std::string> pool = _config.pool();
|
std::map<std::string, std::string> pool = _config.pool();
|
||||||
std::string path = "/_api/agency_priv/inform";
|
std::string path = "/_api/agency_priv/inform";
|
||||||
|
@ -1023,7 +1033,7 @@ void Agent::notifyInactive() const {
|
||||||
auto headerFields =
|
auto headerFields =
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
|
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
cc->asyncRequest(
|
||||||
"1", 1, p.second, arangodb::rest::RequestType::POST,
|
"1", 1, p.second, arangodb::rest::RequestType::POST,
|
||||||
path, std::make_shared<std::string>(out.toJson()), headerFields,
|
path, std::make_shared<std::string>(out.toJson()), headerFields,
|
||||||
nullptr, 1.0, true);
|
nullptr, 1.0, true);
|
||||||
|
|
|
@ -66,11 +66,15 @@ void AgentActivator::run() {
|
||||||
|
|
||||||
auto headerFields =
|
auto headerFields =
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
"1", 1, endpoint, rest::RequestType::POST, path,
|
if (cc != nullptr) {
|
||||||
std::make_shared<std::string>(allLogs->toJson()), headerFields,
|
// nullptr only happens on controlled shutdown
|
||||||
std::make_shared<ActivationCallback>(_agent, _failed, _replacement),
|
cc->asyncRequest(
|
||||||
5.0, true, 1.0);
|
"1", 1, endpoint, rest::RequestType::POST, path,
|
||||||
|
std::make_shared<std::string>(allLogs->toJson()), headerFields,
|
||||||
|
std::make_shared<ActivationCallback>(_agent, _failed, _replacement),
|
||||||
|
5.0, true, 1.0);
|
||||||
|
}
|
||||||
|
|
||||||
_cv.wait(10000000); // 10 sec
|
_cv.wait(10000000); // 10 sec
|
||||||
|
|
||||||
|
|
|
@ -389,11 +389,17 @@ void Constituent::callElection() {
|
||||||
<< "&prevLogTerm=" << _agent->lastLog().term;
|
<< "&prevLogTerm=" << _agent->lastLog().term;
|
||||||
|
|
||||||
// Ask everyone for their vote
|
// Ask everyone for their vote
|
||||||
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// only happens on controlled shutdown
|
||||||
|
follow(_term);
|
||||||
|
return;
|
||||||
|
}
|
||||||
for (auto const& i : active) {
|
for (auto const& i : active) {
|
||||||
if (i != _id) {
|
if (i != _id) {
|
||||||
auto headerFields =
|
auto headerFields =
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
ClusterComm::instance()->asyncRequest(
|
cc->asyncRequest(
|
||||||
"", coordinatorTransactionID, _agent->config().poolAt(i),
|
"", coordinatorTransactionID, _agent->config().poolAt(i),
|
||||||
rest::RequestType::GET, path.str(),
|
rest::RequestType::GET, path.str(),
|
||||||
std::make_shared<std::string>(body), headerFields,
|
std::make_shared<std::string>(body), headerFields,
|
||||||
|
@ -419,8 +425,7 @@ void Constituent::callElection() {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
|
||||||
auto res = ClusterComm::instance()->wait(
|
auto res = cc->wait("", coordinatorTransactionID, 0, "",
|
||||||
"", coordinatorTransactionID, 0, "",
|
|
||||||
duration<double>(steady_clock::now()-timeout).count());
|
duration<double>(steady_clock::now()-timeout).count());
|
||||||
|
|
||||||
if (res.status == CL_COMM_SENT) {
|
if (res.status == CL_COMM_SENT) {
|
||||||
|
@ -461,7 +466,7 @@ void Constituent::callElection() {
|
||||||
<< (yea >= majority ? "yeas" : "nays") << " have it.";
|
<< (yea >= majority ? "yeas" : "nays") << " have it.";
|
||||||
|
|
||||||
// Clean up
|
// Clean up
|
||||||
ClusterComm::instance()->drop("", coordinatorTransactionID, 0, "");
|
cc->drop("", coordinatorTransactionID, 0, "");
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -49,6 +49,11 @@ Inception::~Inception() { shutdown(); }
|
||||||
/// - Create outgoing gossip.
|
/// - Create outgoing gossip.
|
||||||
/// - Send to all peers
|
/// - Send to all peers
|
||||||
void Inception::gossip() {
|
void Inception::gossip() {
|
||||||
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens during controlled shutdown
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Entering gossip phase ...";
|
LOG_TOPIC(INFO, Logger::AGENCY) << "Entering gossip phase ...";
|
||||||
using namespace std::chrono;
|
using namespace std::chrono;
|
||||||
|
@ -93,7 +98,7 @@ void Inception::gossip() {
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
||||||
<< out->toJson() << " to peer " << clientid;
|
<< out->toJson() << " to peer " << clientid;
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
cc->asyncRequest(
|
||||||
clientid, 1, p, rest::RequestType::POST, path,
|
clientid, 1, p, rest::RequestType::POST, path,
|
||||||
std::make_shared<std::string>(out->toJson()), hf,
|
std::make_shared<std::string>(out->toJson()), hf,
|
||||||
std::make_shared<GossipCallback>(_agent, version), 1.0, true, 0.5);
|
std::make_shared<GossipCallback>(_agent, version), 1.0, true, 0.5);
|
||||||
|
@ -116,7 +121,7 @@ void Inception::gossip() {
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
||||||
<< out->toJson() << " to pool member " << clientid;
|
<< out->toJson() << " to pool member " << clientid;
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
cc->asyncRequest(
|
||||||
clientid, 1, pair.second, rest::RequestType::POST, path,
|
clientid, 1, pair.second, rest::RequestType::POST, path,
|
||||||
std::make_shared<std::string>(out->toJson()), hf,
|
std::make_shared<std::string>(out->toJson()), hf,
|
||||||
std::make_shared<GossipCallback>(_agent, version), 1.0, true, 0.5);
|
std::make_shared<GossipCallback>(_agent, version), 1.0, true, 0.5);
|
||||||
|
@ -156,6 +161,11 @@ void Inception::gossip() {
|
||||||
|
|
||||||
|
|
||||||
bool Inception::restartingActiveAgent() {
|
bool Inception::restartingActiveAgent() {
|
||||||
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens during controlled shutdown
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Restarting agent from persistence ...";
|
LOG_TOPIC(INFO, Logger::AGENCY) << "Restarting agent from persistence ...";
|
||||||
|
|
||||||
|
@ -200,7 +210,7 @@ bool Inception::restartingActiveAgent() {
|
||||||
std::vector<std::string> informed;
|
std::vector<std::string> informed;
|
||||||
|
|
||||||
for (auto& p : gp) {
|
for (auto& p : gp) {
|
||||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
auto comres = cc->syncRequest(
|
||||||
clientId, 1, p, rest::RequestType::POST, path, greetstr,
|
clientId, 1, p, rest::RequestType::POST, path, greetstr,
|
||||||
std::unordered_map<std::string, std::string>(), 2.0);
|
std::unordered_map<std::string, std::string>(), 2.0);
|
||||||
if (comres->status == CL_COMM_SENT) {
|
if (comres->status == CL_COMM_SENT) {
|
||||||
|
@ -224,7 +234,7 @@ bool Inception::restartingActiveAgent() {
|
||||||
|
|
||||||
if (p.first != myConfig.id() && p.first != "") {
|
if (p.first != myConfig.id() && p.first != "") {
|
||||||
|
|
||||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
auto comres = cc->syncRequest(
|
||||||
clientId, 1, p.second, rest::RequestType::POST, path, greetstr,
|
clientId, 1, p.second, rest::RequestType::POST, path, greetstr,
|
||||||
std::unordered_map<std::string, std::string>(), 2.0);
|
std::unordered_map<std::string, std::string>(), 2.0);
|
||||||
|
|
||||||
|
@ -249,7 +259,7 @@ bool Inception::restartingActiveAgent() {
|
||||||
|
|
||||||
// Contact leader to update endpoint
|
// Contact leader to update endpoint
|
||||||
if (theirLeaderId != theirId) {
|
if (theirLeaderId != theirId) {
|
||||||
comres = arangodb::ClusterComm::instance()->syncRequest(
|
comres = cc->syncRequest(
|
||||||
clientId, 1, theirLeaderEp, rest::RequestType::POST, path,
|
clientId, 1, theirLeaderEp, rest::RequestType::POST, path,
|
||||||
greetstr, std::unordered_map<std::string, std::string>(), 2.0);
|
greetstr, std::unordered_map<std::string, std::string>(), 2.0);
|
||||||
// Failed to contact leader move on until we do. This way at
|
// Failed to contact leader move on until we do. This way at
|
||||||
|
@ -365,6 +375,11 @@ void Inception::reportVersionForEp(std::string const& endpoint, size_t version)
|
||||||
|
|
||||||
|
|
||||||
bool Inception::estimateRAFTInterval() {
|
bool Inception::estimateRAFTInterval() {
|
||||||
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens during controlled shutdown
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
using namespace std::chrono;
|
using namespace std::chrono;
|
||||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Estimating RAFT timeouts ...";
|
LOG_TOPIC(INFO, Logger::AGENCY) << "Estimating RAFT timeouts ...";
|
||||||
|
@ -382,7 +397,7 @@ bool Inception::estimateRAFTInterval() {
|
||||||
std::string clientid = peer.first + std::to_string(i);
|
std::string clientid = peer.first + std::to_string(i);
|
||||||
auto hf =
|
auto hf =
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
cc->asyncRequest(
|
||||||
clientid, 1, peer.second, rest::RequestType::GET, path,
|
clientid, 1, peer.second, rest::RequestType::GET, path,
|
||||||
std::make_shared<std::string>(), hf,
|
std::make_shared<std::string>(), hf,
|
||||||
std::make_shared<MeasureCallback>(this, peer.second, timeStamp()),
|
std::make_shared<MeasureCallback>(this, peer.second, timeStamp()),
|
||||||
|
@ -448,7 +463,7 @@ bool Inception::estimateRAFTInterval() {
|
||||||
for (auto const& peer : config.pool()) {
|
for (auto const& peer : config.pool()) {
|
||||||
if (peer.first != myid) {
|
if (peer.first != myid) {
|
||||||
auto clientId = "1";
|
auto clientId = "1";
|
||||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
auto comres = cc->syncRequest(
|
||||||
clientId, 1, peer.second, rest::RequestType::POST, path,
|
clientId, 1, peer.second, rest::RequestType::POST, path,
|
||||||
measjson, std::unordered_map<std::string, std::string>(), 5.0);
|
measjson, std::unordered_map<std::string, std::string>(), 5.0);
|
||||||
}
|
}
|
||||||
|
|
|
@ -352,11 +352,15 @@ std::vector<bool> Store::apply(
|
||||||
auto headerFields =
|
auto headerFields =
|
||||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
|
|
||||||
arangodb::ClusterComm::instance()->asyncRequest(
|
auto cc = ClusterComm::instance();
|
||||||
"1", 1, endpoint, rest::RequestType::POST, path,
|
if (cc != nullptr) {
|
||||||
std::make_shared<std::string>(body.toString()), headerFields,
|
// nullptr only happens on controlled shutdown
|
||||||
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true, 0.01);
|
cc->asyncRequest(
|
||||||
|
"1", 1, endpoint, rest::RequestType::POST, path,
|
||||||
|
std::make_shared<std::string>(body.toString()), headerFields,
|
||||||
|
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true,
|
||||||
|
0.01);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
LOG_TOPIC(WARN, Logger::AGENCY) << "Malformed URL " << url;
|
LOG_TOPIC(WARN, Logger::AGENCY) << "Malformed URL " << url;
|
||||||
}
|
}
|
||||||
|
|
|
@ -1233,30 +1233,34 @@ std::unique_ptr<ClusterCommResult> RemoteBlock::sendRequest(
|
||||||
std::string const& body) const {
|
std::string const& body) const {
|
||||||
DEBUG_BEGIN_BLOCK();
|
DEBUG_BEGIN_BLOCK();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
|
||||||
// Later, we probably want to set these sensibly:
|
// Later, we probably want to set these sensibly:
|
||||||
ClientTransactionID const clientTransactionId = "AQL";
|
ClientTransactionID const clientTransactionId = "AQL";
|
||||||
CoordTransactionID const coordTransactionId = TRI_NewTickServer();
|
CoordTransactionID const coordTransactionId = TRI_NewTickServer();
|
||||||
std::unordered_map<std::string, std::string> headers;
|
std::unordered_map<std::string, std::string> headers;
|
||||||
if (!_ownName.empty()) {
|
if (!_ownName.empty()) {
|
||||||
headers.emplace("Shard-Id", _ownName);
|
headers.emplace("Shard-Id", _ownName);
|
||||||
}
|
}
|
||||||
|
|
||||||
++_engine->_stats.httpRequests;
|
++_engine->_stats.httpRequests;
|
||||||
{
|
{
|
||||||
JobGuard guard(SchedulerFeature::SCHEDULER);
|
JobGuard guard(SchedulerFeature::SCHEDULER);
|
||||||
guard.block();
|
guard.block();
|
||||||
|
|
||||||
auto result =
|
auto result =
|
||||||
cc->syncRequest(clientTransactionId, coordTransactionId, _server, type,
|
cc->syncRequest(clientTransactionId, coordTransactionId, _server, type,
|
||||||
std::string("/_db/") +
|
std::string("/_db/") +
|
||||||
arangodb::basics::StringUtils::urlEncode(
|
arangodb::basics::StringUtils::urlEncode(
|
||||||
_engine->getQuery()->trx()->vocbase()->name()) +
|
_engine->getQuery()->trx()->vocbase()->name()) +
|
||||||
urlPart + _queryId,
|
urlPart + _queryId,
|
||||||
body, headers, defaultTimeOut);
|
body, headers, defaultTimeOut);
|
||||||
|
|
||||||
return result;
|
return result;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
return std::make_unique<ClusterCommResult>();
|
||||||
|
|
||||||
// cppcheck-suppress style
|
// cppcheck-suppress style
|
||||||
DEBUG_END_BLOCK();
|
DEBUG_END_BLOCK();
|
||||||
|
|
|
@ -582,16 +582,19 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
||||||
// << "\n";
|
// << "\n";
|
||||||
|
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
|
||||||
std::string const url("/_db/"
|
std::string const url("/_db/"
|
||||||
+ arangodb::basics::StringUtils::urlEncode(collection->vocbase->name()) +
|
+ arangodb::basics::StringUtils::urlEncode(collection->vocbase->name()) +
|
||||||
"/_api/aql/instantiate");
|
"/_api/aql/instantiate");
|
||||||
|
|
||||||
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
|
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||||
(*headers)["X-Arango-Nolock"] = shardId; // Prevent locking
|
(*headers)["X-Arango-Nolock"] = shardId; // Prevent locking
|
||||||
cc->asyncRequest("", coordTransactionID, "shard:" + shardId,
|
cc->asyncRequest("", coordTransactionID, "shard:" + shardId,
|
||||||
arangodb::rest::RequestType::POST,
|
arangodb::rest::RequestType::POST,
|
||||||
url, body, headers, nullptr, 30.0);
|
url, body, headers, nullptr, 30.0);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief aggregateQueryIds, get answers for all shards in a Scatter/Gather
|
/// @brief aggregateQueryIds, get answers for all shards in a Scatter/Gather
|
||||||
|
@ -670,28 +673,29 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
||||||
// now send the plan to the remote servers
|
// now send the plan to the remote servers
|
||||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
TRI_ASSERT(cc != nullptr);
|
if (cc != nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
// iterate over all shards of the collection
|
||||||
|
size_t nr = 0;
|
||||||
|
auto shardIds = collection->shardIds();
|
||||||
|
for (auto const& shardId : *shardIds) {
|
||||||
|
// inject the current shard id into the collection
|
||||||
|
VPackBuilder b;
|
||||||
|
collection->setCurrentShard(shardId);
|
||||||
|
generatePlanForOneShard(b, nr++, info, connectedId, shardId, true);
|
||||||
|
|
||||||
// iterate over all shards of the collection
|
distributePlanToShard(coordTransactionID, info,
|
||||||
size_t nr = 0;
|
connectedId, shardId,
|
||||||
auto shardIds = collection->shardIds();
|
b.slice());
|
||||||
for (auto const& shardId : *shardIds) {
|
}
|
||||||
// inject the current shard id into the collection
|
collection->resetCurrentShard();
|
||||||
VPackBuilder b;
|
for (auto const& auxiliaryCollection: auxiliaryCollections) {
|
||||||
collection->setCurrentShard(shardId);
|
TRI_ASSERT(auxiliaryCollection->shardIds()->size() == 1);
|
||||||
generatePlanForOneShard(b, nr++, info, connectedId, shardId, true);
|
auxiliaryCollection->resetCurrentShard();
|
||||||
|
}
|
||||||
|
|
||||||
distributePlanToShard(coordTransactionID, info,
|
aggregateQueryIds(info, cc, coordTransactionID, collection);
|
||||||
connectedId, shardId,
|
|
||||||
b.slice());
|
|
||||||
}
|
}
|
||||||
collection->resetCurrentShard();
|
|
||||||
for (auto const& auxiliaryCollection: auxiliaryCollections) {
|
|
||||||
TRI_ASSERT(auxiliaryCollection->shardIds()->size() == 1);
|
|
||||||
auxiliaryCollection->resetCurrentShard();
|
|
||||||
}
|
|
||||||
|
|
||||||
aggregateQueryIds(info, cc, coordTransactionID, collection);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief buildEngineCoordinator, for a single piece
|
/// @brief buildEngineCoordinator, for a single piece
|
||||||
|
@ -931,6 +935,10 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
||||||
query->vocbase()->name()) +
|
query->vocbase()->name()) +
|
||||||
"/_internal/traverser");
|
"/_internal/traverser");
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
return;
|
||||||
|
}
|
||||||
bool hasVars = false;
|
bool hasVars = false;
|
||||||
VPackBuilder varInfo;
|
VPackBuilder varInfo;
|
||||||
std::vector<aql::Variable const*> vars;
|
std::vector<aql::Variable const*> vars;
|
||||||
|
@ -1229,6 +1237,11 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
||||||
// Lock shard on DBserver:
|
// Lock shard on DBserver:
|
||||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
THROW_ARANGO_EXCEPTION( TRI_ERROR_SHUTTING_DOWN);
|
||||||
|
}
|
||||||
|
|
||||||
TRI_vocbase_t* vocbase = query->vocbase();
|
TRI_vocbase_t* vocbase = query->vocbase();
|
||||||
std::unique_ptr<ClusterCommResult> res;
|
std::unique_ptr<ClusterCommResult> res;
|
||||||
std::unordered_map<std::string, std::string> headers;
|
std::unordered_map<std::string, std::string> headers;
|
||||||
|
@ -1263,70 +1276,73 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
||||||
// the DBservers via HTTP:
|
// the DBservers via HTTP:
|
||||||
TRI_vocbase_t* vocbase = query->vocbase();
|
TRI_vocbase_t* vocbase = query->vocbase();
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
for (auto& q : inst.get()->queryIds) {
|
if (cc != nullptr) {
|
||||||
std::string theId = q.first;
|
// nullptr only happens during controlled shutdown
|
||||||
std::string queryId = q.second;
|
for (auto& q : inst.get()->queryIds) {
|
||||||
auto pos = theId.find(':');
|
std::string theId = q.first;
|
||||||
if (pos != std::string::npos) {
|
std::string queryId = q.second;
|
||||||
// So this is a remote one on a DBserver:
|
auto pos = theId.find(':');
|
||||||
std::string shardId = theId.substr(pos + 1);
|
if (pos != std::string::npos) {
|
||||||
// Remove query from DBserver:
|
// So this is a remote one on a DBserver:
|
||||||
arangodb::CoordTransactionID coordTransactionID =
|
std::string shardId = theId.substr(pos + 1);
|
||||||
TRI_NewTickServer();
|
// Remove query from DBserver:
|
||||||
if (queryId.back() == '*') {
|
arangodb::CoordTransactionID coordTransactionID =
|
||||||
queryId.pop_back();
|
TRI_NewTickServer();
|
||||||
|
if (queryId.back() == '*') {
|
||||||
|
queryId.pop_back();
|
||||||
|
}
|
||||||
|
std::string const url(
|
||||||
|
"/_db/" +
|
||||||
|
arangodb::basics::StringUtils::urlEncode(vocbase->name()) +
|
||||||
|
"/_api/aql/shutdown/" + queryId);
|
||||||
|
std::unordered_map<std::string, std::string> headers;
|
||||||
|
auto res =
|
||||||
|
cc->syncRequest("", coordTransactionID, "shard:" + shardId,
|
||||||
|
arangodb::rest::RequestType::PUT,
|
||||||
|
url, "{\"code\": 0}", headers, 120.0);
|
||||||
|
// Ignore result, we need to try to remove all.
|
||||||
|
// However, log the incident if we have an errorMessage.
|
||||||
|
if (!res->errorMessage.empty()) {
|
||||||
|
std::string msg("while trying to unregister query ");
|
||||||
|
msg += queryId + ": " + res->stringifyErrorMessage();
|
||||||
|
LOG(WARN) << msg;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Remove query from registry:
|
||||||
|
try {
|
||||||
|
queryRegistry->destroy(
|
||||||
|
vocbase, arangodb::basics::StringUtils::uint64(queryId),
|
||||||
|
TRI_ERROR_INTERNAL);
|
||||||
|
} catch (...) {
|
||||||
|
// Ignore problems
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
// Also we need to destroy all traverser engines that have been pushed to DBServers
|
||||||
|
{
|
||||||
|
|
||||||
std::string const url(
|
std::string const url(
|
||||||
"/_db/" +
|
"/_db/" +
|
||||||
arangodb::basics::StringUtils::urlEncode(vocbase->name()) +
|
arangodb::basics::StringUtils::urlEncode(vocbase->name()) +
|
||||||
"/_api/aql/shutdown/" + queryId);
|
"/_internal/traverser/");
|
||||||
std::unordered_map<std::string, std::string> headers;
|
for (auto& te : inst.get()->traverserEngines) {
|
||||||
auto res =
|
std::string traverserId = arangodb::basics::StringUtils::itoa(te.first);
|
||||||
cc->syncRequest("", coordTransactionID, "shard:" + shardId,
|
arangodb::CoordTransactionID coordTransactionID =
|
||||||
arangodb::rest::RequestType::PUT,
|
TRI_NewTickServer();
|
||||||
url, "{\"code\": 0}", headers, 120.0);
|
std::unordered_map<std::string, std::string> headers;
|
||||||
// Ignore result, we need to try to remove all.
|
// NOTE: te.second is the list of shards. So we just send delete
|
||||||
// However, log the incident if we have an errorMessage.
|
// to the first of those shards
|
||||||
if (!res->errorMessage.empty()) {
|
auto res = cc->syncRequest(
|
||||||
std::string msg("while trying to unregister query ");
|
"", coordTransactionID, "shard:" + *(te.second.begin()),
|
||||||
msg += queryId + ": " + res->stringifyErrorMessage();
|
RequestType::DELETE_REQ, url + traverserId, "", headers, 30.0);
|
||||||
LOG(WARN) << msg;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
// Remove query from registry:
|
|
||||||
try {
|
|
||||||
queryRegistry->destroy(
|
|
||||||
vocbase, arangodb::basics::StringUtils::uint64(queryId),
|
|
||||||
TRI_ERROR_INTERNAL);
|
|
||||||
} catch (...) {
|
|
||||||
// Ignore problems
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Also we need to destroy all traverser engines that have been pushed to DBServers
|
|
||||||
{
|
|
||||||
|
|
||||||
std::string const url(
|
// Ignore result, we need to try to remove all.
|
||||||
"/_db/" +
|
// However, log the incident if we have an errorMessage.
|
||||||
arangodb::basics::StringUtils::urlEncode(vocbase->name()) +
|
if (!res->errorMessage.empty()) {
|
||||||
"/_internal/traverser/");
|
std::string msg("while trying to unregister traverser engine ");
|
||||||
for (auto& te : inst.get()->traverserEngines) {
|
msg += traverserId + ": " + res->stringifyErrorMessage();
|
||||||
std::string traverserId = arangodb::basics::StringUtils::itoa(te.first);
|
LOG(WARN) << msg;
|
||||||
arangodb::CoordTransactionID coordTransactionID =
|
}
|
||||||
TRI_NewTickServer();
|
|
||||||
std::unordered_map<std::string, std::string> headers;
|
|
||||||
// NOTE: te.second is the list of shards. So we just send delete
|
|
||||||
// to the first of those shards
|
|
||||||
auto res = cc->syncRequest(
|
|
||||||
"", coordTransactionID, "shard:" + *(te.second.begin()),
|
|
||||||
RequestType::DELETE_REQ, url + traverserId, "", headers, 30.0);
|
|
||||||
|
|
||||||
// Ignore result, we need to try to remove all.
|
|
||||||
// However, log the incident if we have an errorMessage.
|
|
||||||
if (!res->errorMessage.empty()) {
|
|
||||||
std::string msg("while trying to unregister traverser engine ");
|
|
||||||
msg += traverserId + ": " + res->stringifyErrorMessage();
|
|
||||||
LOG(WARN) << msg;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -192,23 +192,26 @@ int TraversalBlock::shutdown(int errorCode) {
|
||||||
// We have to clean up the engines in Coordinator Case.
|
// We have to clean up the engines in Coordinator Case.
|
||||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
std::string const url(
|
if (cc != nullptr) {
|
||||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(_trx->vocbase()->name()) +
|
// nullptr only happens on controlled server shutdown
|
||||||
"/_internal/traverser/");
|
std::string const url(
|
||||||
for (auto const& it : *_engines) {
|
"/_db/" + arangodb::basics::StringUtils::urlEncode(_trx->vocbase()->name()) +
|
||||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
"/_internal/traverser/");
|
||||||
std::unordered_map<std::string, std::string> headers;
|
for (auto const& it : *_engines) {
|
||||||
auto res = cc->syncRequest(
|
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||||
"", coordTransactionID, "server:" + it.first, RequestType::DELETE_REQ,
|
std::unordered_map<std::string, std::string> headers;
|
||||||
url + arangodb::basics::StringUtils::itoa(it.second), "", headers,
|
auto res = cc->syncRequest(
|
||||||
30.0);
|
"", coordTransactionID, "server:" + it.first, RequestType::DELETE_REQ,
|
||||||
if (res->status != CL_COMM_SENT) {
|
url + arangodb::basics::StringUtils::itoa(it.second), "", headers,
|
||||||
// Note If there was an error on server side we do not have CL_COMM_SENT
|
30.0);
|
||||||
std::string message("Could not destroy all traversal engines");
|
if (res->status != CL_COMM_SENT) {
|
||||||
if (!res->errorMessage.empty()) {
|
// Note If there was an error on server side we do not have CL_COMM_SENT
|
||||||
message += std::string(": ") + res->errorMessage;
|
std::string message("Could not destroy all traversal engines");
|
||||||
|
if (!res->errorMessage.empty()) {
|
||||||
|
message += std::string(": ") + res->errorMessage;
|
||||||
|
}
|
||||||
|
LOG(ERR) << message;
|
||||||
}
|
}
|
||||||
LOG(ERR) << message;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -562,6 +562,10 @@ int revisionOnCoordinator(std::string const& dbname,
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo;
|
std::shared_ptr<LogicalCollection> collinfo;
|
||||||
|
@ -636,6 +640,10 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo;
|
std::shared_ptr<LogicalCollection> collinfo;
|
||||||
|
@ -701,6 +709,10 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
result.clear();
|
result.clear();
|
||||||
|
|
||||||
|
@ -771,6 +783,10 @@ int createDocumentOnCoordinator(
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo;
|
std::shared_ptr<LogicalCollection> collinfo;
|
||||||
|
@ -906,6 +922,10 @@ int deleteDocumentOnCoordinator(
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo;
|
std::shared_ptr<LogicalCollection> collinfo;
|
||||||
|
@ -1135,6 +1155,10 @@ int truncateCollectionOnCoordinator(std::string const& dbname,
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo;
|
std::shared_ptr<LogicalCollection> collinfo;
|
||||||
|
@ -1191,6 +1215,10 @@ int getDocumentOnCoordinator(
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo;
|
std::shared_ptr<LogicalCollection> collinfo;
|
||||||
|
@ -1462,6 +1490,10 @@ int fetchEdgesFromEngines(
|
||||||
size_t& filtered,
|
size_t& filtered,
|
||||||
size_t& read) {
|
size_t& read) {
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
// TODO map id => ServerID if possible
|
// TODO map id => ServerID if possible
|
||||||
// And go fast-path
|
// And go fast-path
|
||||||
|
|
||||||
|
@ -1546,6 +1578,10 @@ void fetchVerticesFromEngines(
|
||||||
result,
|
result,
|
||||||
VPackBuilder& builder) {
|
VPackBuilder& builder) {
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return;
|
||||||
|
}
|
||||||
// TODO map id => ServerID if possible
|
// TODO map id => ServerID if possible
|
||||||
// And go fast-path
|
// And go fast-path
|
||||||
|
|
||||||
|
@ -1636,6 +1672,10 @@ int getFilteredEdgesOnCoordinator(
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo =
|
std::shared_ptr<LogicalCollection> collinfo =
|
||||||
|
@ -1755,6 +1795,10 @@ int modifyDocumentOnCoordinator(
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
|
|
||||||
// First determine the collection ID from the name:
|
// First determine the collection ID from the name:
|
||||||
std::shared_ptr<LogicalCollection> collinfo =
|
std::shared_ptr<LogicalCollection> collinfo =
|
||||||
|
@ -2005,6 +2049,10 @@ int modifyDocumentOnCoordinator(
|
||||||
int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
|
int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
std::vector<ServerID> DBservers = ci->getCurrentDBServers();
|
std::vector<ServerID> DBservers = ci->getCurrentDBServers();
|
||||||
CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||||
std::string url = std::string("/_admin/wal/flush?waitForSync=") +
|
std::string url = std::string("/_admin/wal/flush?waitForSync=") +
|
||||||
|
|
|
@ -745,7 +745,8 @@ bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) {
|
||||||
ClusterInfo::instance()->flush();
|
ClusterInfo::instance()->flush();
|
||||||
|
|
||||||
// turn on error logging now
|
// turn on error logging now
|
||||||
if (!ClusterComm::instance()->enableConnectionErrorLogging(true)) {
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc != nullptr && cc->enableConnectionErrorLogging(true)) {
|
||||||
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
|
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
|
||||||
<< "created coordinator databases for the first time";
|
<< "created coordinator databases for the first time";
|
||||||
}
|
}
|
||||||
|
|
|
@ -1811,8 +1811,8 @@ static void JS_AsyncRequest(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
|
||||||
if (cc == nullptr) {
|
if (cc == nullptr) {
|
||||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_SHUTTING_DOWN,
|
||||||
"clustercomm object not found");
|
"clustercomm object not found (JS_AsyncRequest)");
|
||||||
}
|
}
|
||||||
|
|
||||||
arangodb::rest::RequestType reqType;
|
arangodb::rest::RequestType reqType;
|
||||||
|
@ -1878,7 +1878,7 @@ static void JS_SyncRequest(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
|
||||||
if (cc == nullptr) {
|
if (cc == nullptr) {
|
||||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_SHUTTING_DOWN,
|
||||||
"clustercomm object not found");
|
"clustercomm object not found");
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1931,7 +1931,7 @@ static void JS_Enquire(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
|
|
||||||
if (cc == nullptr) {
|
if (cc == nullptr) {
|
||||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||||
"clustercomm object not found");
|
"clustercomm object not found (JS_SyncRequest)");
|
||||||
}
|
}
|
||||||
|
|
||||||
OperationID operationID = TRI_ObjectToUInt64(args[0], true);
|
OperationID operationID = TRI_ObjectToUInt64(args[0], true);
|
||||||
|
@ -1967,8 +1967,8 @@ static void JS_Wait(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
|
||||||
if (cc == nullptr) {
|
if (cc == nullptr) {
|
||||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_SHUTTING_DOWN,
|
||||||
"clustercomm object not found");
|
"clustercomm object not found (JS_Wait)");
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientTransactionID myclientTransactionID = "";
|
ClientTransactionID myclientTransactionID = "";
|
||||||
|
@ -2038,7 +2038,7 @@ static void JS_Drop(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||||
|
|
||||||
if (cc == nullptr) {
|
if (cc == nullptr) {
|
||||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||||
"clustercomm object not found");
|
"clustercomm object not found (JS_Drop)");
|
||||||
}
|
}
|
||||||
|
|
||||||
ClientTransactionID myclientTransactionID = "";
|
ClientTransactionID myclientTransactionID = "";
|
||||||
|
@ -2116,9 +2116,13 @@ static void JS_ClusterDownload(v8::FunctionCallbackInfo<v8::Value> const& args)
|
||||||
}
|
}
|
||||||
options->Set(TRI_V8_ASCII_STRING("headers"), headers);
|
options->Set(TRI_V8_ASCII_STRING("headers"), headers);
|
||||||
|
|
||||||
std::string const authorization = "bearer " + ClusterComm::instance()->jwt();
|
auto cc = ClusterComm::instance();
|
||||||
v8::Handle<v8::String> v8Authorization = TRI_V8_STD_STRING(authorization);
|
if (cc != nullptr) {
|
||||||
headers->Set(TRI_V8_ASCII_STRING("Authorization"), v8Authorization);
|
// nullptr happens only during controlled shutdown
|
||||||
|
std::string authorization = "bearer " + ClusterComm::instance()->jwt();
|
||||||
|
v8::Handle<v8::String> v8Authorization = TRI_V8_STD_STRING(authorization);
|
||||||
|
headers->Set(TRI_V8_ASCII_STRING("Authorization"), v8Authorization);
|
||||||
|
}
|
||||||
args[2] = options;
|
args[2] = options;
|
||||||
}
|
}
|
||||||
TRI_V8_TRY_CATCH_END
|
TRI_V8_TRY_CATCH_END
|
||||||
|
|
|
@ -203,23 +203,19 @@ bool RestDocumentHandler::readSingleDocument(bool generateBody) {
|
||||||
|
|
||||||
// check for an etag
|
// check for an etag
|
||||||
bool isValidRevision;
|
bool isValidRevision;
|
||||||
TRI_voc_rid_t const ifNoneRid =
|
TRI_voc_rid_t ifNoneRid =
|
||||||
extractRevision("if-none-match", isValidRevision);
|
extractRevision("if-none-match", isValidRevision);
|
||||||
if (!isValidRevision) {
|
if (!isValidRevision) {
|
||||||
generateError(rest::ResponseCode::BAD,
|
ifNoneRid = 1; // an impossible rev, so precondition failed will happen
|
||||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
OperationOptions options;
|
OperationOptions options;
|
||||||
options.ignoreRevs = true;
|
options.ignoreRevs = true;
|
||||||
|
|
||||||
TRI_voc_rid_t const ifRid =
|
TRI_voc_rid_t ifRid =
|
||||||
extractRevision("if-match", isValidRevision);
|
extractRevision("if-match", isValidRevision);
|
||||||
if (!isValidRevision) {
|
if (!isValidRevision) {
|
||||||
generateError(rest::ResponseCode::BAD,
|
ifRid = 1; // an impossible rev, so precondition failed will happen
|
||||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VPackBuilder builder;
|
VPackBuilder builder;
|
||||||
|
@ -396,9 +392,7 @@ bool RestDocumentHandler::modifyDocument(bool isPatch) {
|
||||||
bool isValidRevision;
|
bool isValidRevision;
|
||||||
revision = extractRevision("if-match", isValidRevision);
|
revision = extractRevision("if-match", isValidRevision);
|
||||||
if (!isValidRevision) {
|
if (!isValidRevision) {
|
||||||
generateError(rest::ResponseCode::BAD,
|
revision = 1; // an impossible revision, so precondition failed
|
||||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
VPackSlice keyInBody = body.get(StaticStrings::KeyString);
|
VPackSlice keyInBody = body.get(StaticStrings::KeyString);
|
||||||
if ((revision != 0 && TRI_ExtractRevisionId(body) != revision) ||
|
if ((revision != 0 && TRI_ExtractRevisionId(body) != revision) ||
|
||||||
|
@ -502,9 +496,7 @@ bool RestDocumentHandler::deleteDocument() {
|
||||||
bool isValidRevision = false;
|
bool isValidRevision = false;
|
||||||
revision = extractRevision("if-match", isValidRevision);
|
revision = extractRevision("if-match", isValidRevision);
|
||||||
if (!isValidRevision) {
|
if (!isValidRevision) {
|
||||||
generateError(rest::ResponseCode::BAD,
|
revision = 1; // an impossible revision, so precondition failed
|
||||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -775,6 +775,12 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
|
||||||
|
|
||||||
// Set a few variables needed for our work:
|
// Set a few variables needed for our work:
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
generateError(rest::ResponseCode::BAD, TRI_ERROR_SHUTTING_DOWN,
|
||||||
|
"shutting down server");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<ClusterCommResult> res;
|
std::unique_ptr<ClusterCommResult> res;
|
||||||
if (!useVpp) {
|
if (!useVpp) {
|
||||||
|
|
|
@ -1917,30 +1917,33 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
||||||
path, body);
|
path, body);
|
||||||
}
|
}
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
size_t nrDone = 0;
|
if (cc != nullptr) {
|
||||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
// nullptr only happens on controlled shutdown
|
||||||
nrDone, Logger::REPLICATION);
|
size_t nrDone = 0;
|
||||||
if (nrGood < followers->size()) {
|
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||||
// we drop all followers that were not successful:
|
nrDone, Logger::REPLICATION);
|
||||||
for (size_t i = 0; i < followers->size(); ++i) {
|
if (nrGood < followers->size()) {
|
||||||
bool replicationWorked
|
// we drop all followers that were not successful:
|
||||||
= requests[i].done &&
|
for (size_t i = 0; i < followers->size(); ++i) {
|
||||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
bool replicationWorked
|
||||||
(requests[i].result.answer_code ==
|
= requests[i].done &&
|
||||||
rest::ResponseCode::ACCEPTED ||
|
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||||
requests[i].result.answer_code ==
|
(requests[i].result.answer_code ==
|
||||||
rest::ResponseCode::CREATED);
|
rest::ResponseCode::ACCEPTED ||
|
||||||
if (replicationWorked) {
|
requests[i].result.answer_code ==
|
||||||
bool found;
|
rest::ResponseCode::CREATED);
|
||||||
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
if (replicationWorked) {
|
||||||
replicationWorked = !found;
|
bool found;
|
||||||
}
|
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
||||||
if (!replicationWorked) {
|
replicationWorked = !found;
|
||||||
auto const& followerInfo = collection->followers();
|
}
|
||||||
followerInfo->remove((*followers)[i]);
|
if (!replicationWorked) {
|
||||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
auto const& followerInfo = collection->followers();
|
||||||
<< "insertLocal: dropping follower "
|
followerInfo->remove((*followers)[i]);
|
||||||
<< (*followers)[i] << " for shard " << collectionName;
|
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||||
|
<< "insertLocal: dropping follower "
|
||||||
|
<< (*followers)[i] << " for shard " << collectionName;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2175,82 +2178,84 @@ OperationResult Transaction::modifyLocal(
|
||||||
|
|
||||||
// Now replicate the good operations on all followers:
|
// Now replicate the good operations on all followers:
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
std::string path
|
||||||
|
= "/_db/" +
|
||||||
|
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
||||||
|
"/_api/document/" +
|
||||||
|
arangodb::basics::StringUtils::urlEncode(collection->name())
|
||||||
|
+ "?isRestore=true";
|
||||||
|
|
||||||
std::string path
|
VPackBuilder payload;
|
||||||
= "/_db/" +
|
|
||||||
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
|
||||||
"/_api/document/" +
|
|
||||||
arangodb::basics::StringUtils::urlEncode(collection->name())
|
|
||||||
+ "?isRestore=true";
|
|
||||||
|
|
||||||
VPackBuilder payload;
|
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||||
|
VPackObjectBuilder guard(&payload);
|
||||||
|
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||||
|
payload.add(StaticStrings::KeyString, s);
|
||||||
|
s = result.get(StaticStrings::RevString);
|
||||||
|
payload.add(StaticStrings::RevString, s);
|
||||||
|
TRI_SanitizeObject(doc, payload);
|
||||||
|
};
|
||||||
|
|
||||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
VPackSlice ourResult = resultBuilder.slice();
|
||||||
VPackObjectBuilder guard(&payload);
|
size_t count = 0;
|
||||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
if (multiCase) {
|
||||||
payload.add(StaticStrings::KeyString, s);
|
VPackArrayBuilder guard(&payload);
|
||||||
s = result.get(StaticStrings::RevString);
|
VPackArrayIterator itValue(newValue);
|
||||||
payload.add(StaticStrings::RevString, s);
|
VPackArrayIterator itResult(ourResult);
|
||||||
TRI_SanitizeObject(doc, payload);
|
while (itValue.valid() && itResult.valid()) {
|
||||||
};
|
TRI_ASSERT((*itResult).isObject());
|
||||||
|
if (!(*itResult).hasKey("error")) {
|
||||||
VPackSlice ourResult = resultBuilder.slice();
|
doOneDoc(itValue.value(), itResult.value());
|
||||||
size_t count = 0;
|
count++;
|
||||||
if (multiCase) {
|
|
||||||
VPackArrayBuilder guard(&payload);
|
|
||||||
VPackArrayIterator itValue(newValue);
|
|
||||||
VPackArrayIterator itResult(ourResult);
|
|
||||||
while (itValue.valid() && itResult.valid()) {
|
|
||||||
TRI_ASSERT((*itResult).isObject());
|
|
||||||
if (!(*itResult).hasKey("error")) {
|
|
||||||
doOneDoc(itValue.value(), itResult.value());
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
itValue.next();
|
|
||||||
itResult.next();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
VPackArrayBuilder guard(&payload);
|
|
||||||
doOneDoc(newValue, ourResult);
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
if (count > 0) {
|
|
||||||
auto body = std::make_shared<std::string>();
|
|
||||||
*body = payload.slice().toJson();
|
|
||||||
|
|
||||||
// Now prepare the requests:
|
|
||||||
std::vector<ClusterCommRequest> requests;
|
|
||||||
for (auto const& f : *followers) {
|
|
||||||
requests.emplace_back("server:" + f,
|
|
||||||
operation == TRI_VOC_DOCUMENT_OPERATION_REPLACE ?
|
|
||||||
arangodb::rest::RequestType::PUT :
|
|
||||||
arangodb::rest::RequestType::PATCH,
|
|
||||||
path, body);
|
|
||||||
}
|
|
||||||
size_t nrDone = 0;
|
|
||||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
|
||||||
nrDone, Logger::REPLICATION);
|
|
||||||
if (nrGood < followers->size()) {
|
|
||||||
// we drop all followers that were not successful:
|
|
||||||
for (size_t i = 0; i < followers->size(); ++i) {
|
|
||||||
bool replicationWorked
|
|
||||||
= requests[i].done &&
|
|
||||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
|
||||||
(requests[i].result.answer_code ==
|
|
||||||
rest::ResponseCode::ACCEPTED ||
|
|
||||||
requests[i].result.answer_code ==
|
|
||||||
rest::ResponseCode::OK);
|
|
||||||
if (replicationWorked) {
|
|
||||||
bool found;
|
|
||||||
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
|
||||||
replicationWorked = !found;
|
|
||||||
}
|
}
|
||||||
if (!replicationWorked) {
|
itValue.next();
|
||||||
auto const& followerInfo = collection->followers();
|
itResult.next();
|
||||||
followerInfo->remove((*followers)[i]);
|
}
|
||||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
} else {
|
||||||
<< "modifyLocal: dropping follower "
|
VPackArrayBuilder guard(&payload);
|
||||||
<< (*followers)[i] << " for shard " << collectionName;
|
doOneDoc(newValue, ourResult);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
if (count > 0) {
|
||||||
|
auto body = std::make_shared<std::string>();
|
||||||
|
*body = payload.slice().toJson();
|
||||||
|
|
||||||
|
// Now prepare the requests:
|
||||||
|
std::vector<ClusterCommRequest> requests;
|
||||||
|
for (auto const& f : *followers) {
|
||||||
|
requests.emplace_back("server:" + f,
|
||||||
|
operation == TRI_VOC_DOCUMENT_OPERATION_REPLACE ?
|
||||||
|
arangodb::rest::RequestType::PUT :
|
||||||
|
arangodb::rest::RequestType::PATCH,
|
||||||
|
path, body);
|
||||||
|
}
|
||||||
|
size_t nrDone = 0;
|
||||||
|
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||||
|
nrDone, Logger::REPLICATION);
|
||||||
|
if (nrGood < followers->size()) {
|
||||||
|
// we drop all followers that were not successful:
|
||||||
|
for (size_t i = 0; i < followers->size(); ++i) {
|
||||||
|
bool replicationWorked
|
||||||
|
= requests[i].done &&
|
||||||
|
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||||
|
(requests[i].result.answer_code ==
|
||||||
|
rest::ResponseCode::ACCEPTED ||
|
||||||
|
requests[i].result.answer_code ==
|
||||||
|
rest::ResponseCode::OK);
|
||||||
|
if (replicationWorked) {
|
||||||
|
bool found;
|
||||||
|
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
||||||
|
replicationWorked = !found;
|
||||||
|
}
|
||||||
|
if (!replicationWorked) {
|
||||||
|
auto const& followerInfo = collection->followers();
|
||||||
|
followerInfo->remove((*followers)[i]);
|
||||||
|
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||||
|
<< "modifyLocal: dropping follower "
|
||||||
|
<< (*followers)[i] << " for shard " << collectionName;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2417,80 +2422,83 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
|
|
||||||
// Now replicate the good operations on all followers:
|
// Now replicate the good operations on all followers:
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr only happens on controled shutdown
|
||||||
|
|
||||||
std::string path
|
std::string path
|
||||||
= "/_db/" +
|
= "/_db/" +
|
||||||
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
||||||
"/_api/document/" +
|
"/_api/document/" +
|
||||||
arangodb::basics::StringUtils::urlEncode(collection->name())
|
arangodb::basics::StringUtils::urlEncode(collection->name())
|
||||||
+ "?isRestore=true";
|
+ "?isRestore=true";
|
||||||
|
|
||||||
VPackBuilder payload;
|
VPackBuilder payload;
|
||||||
|
|
||||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||||
VPackObjectBuilder guard(&payload);
|
VPackObjectBuilder guard(&payload);
|
||||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||||
payload.add(StaticStrings::KeyString, s);
|
payload.add(StaticStrings::KeyString, s);
|
||||||
s = result.get(StaticStrings::RevString);
|
s = result.get(StaticStrings::RevString);
|
||||||
payload.add(StaticStrings::RevString, s);
|
payload.add(StaticStrings::RevString, s);
|
||||||
TRI_SanitizeObject(doc, payload);
|
TRI_SanitizeObject(doc, payload);
|
||||||
};
|
};
|
||||||
|
|
||||||
VPackSlice ourResult = resultBuilder.slice();
|
VPackSlice ourResult = resultBuilder.slice();
|
||||||
size_t count = 0;
|
size_t count = 0;
|
||||||
if (value.isArray()) {
|
if (value.isArray()) {
|
||||||
VPackArrayBuilder guard(&payload);
|
VPackArrayBuilder guard(&payload);
|
||||||
VPackArrayIterator itValue(value);
|
VPackArrayIterator itValue(value);
|
||||||
VPackArrayIterator itResult(ourResult);
|
VPackArrayIterator itResult(ourResult);
|
||||||
while (itValue.valid() && itResult.valid()) {
|
while (itValue.valid() && itResult.valid()) {
|
||||||
TRI_ASSERT((*itResult).isObject());
|
TRI_ASSERT((*itResult).isObject());
|
||||||
if (!(*itResult).hasKey("error")) {
|
if (!(*itResult).hasKey("error")) {
|
||||||
doOneDoc(itValue.value(), itResult.value());
|
doOneDoc(itValue.value(), itResult.value());
|
||||||
count++;
|
count++;
|
||||||
}
|
|
||||||
itValue.next();
|
|
||||||
itResult.next();
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
VPackArrayBuilder guard(&payload);
|
|
||||||
doOneDoc(value, ourResult);
|
|
||||||
count++;
|
|
||||||
}
|
|
||||||
if (count > 0) {
|
|
||||||
auto body = std::make_shared<std::string>();
|
|
||||||
*body = payload.slice().toJson();
|
|
||||||
|
|
||||||
// Now prepare the requests:
|
|
||||||
std::vector<ClusterCommRequest> requests;
|
|
||||||
for (auto const& f : *followers) {
|
|
||||||
requests.emplace_back("server:" + f,
|
|
||||||
arangodb::rest::RequestType::DELETE_REQ,
|
|
||||||
path, body);
|
|
||||||
}
|
|
||||||
size_t nrDone = 0;
|
|
||||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
|
||||||
nrDone, Logger::REPLICATION);
|
|
||||||
if (nrGood < followers->size()) {
|
|
||||||
// we drop all followers that were not successful:
|
|
||||||
for (size_t i = 0; i < followers->size(); ++i) {
|
|
||||||
bool replicationWorked
|
|
||||||
= requests[i].done &&
|
|
||||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
|
||||||
(requests[i].result.answer_code ==
|
|
||||||
rest::ResponseCode::ACCEPTED ||
|
|
||||||
requests[i].result.answer_code ==
|
|
||||||
rest::ResponseCode::OK);
|
|
||||||
if (replicationWorked) {
|
|
||||||
bool found;
|
|
||||||
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
|
||||||
replicationWorked = !found;
|
|
||||||
}
|
}
|
||||||
if (!replicationWorked) {
|
itValue.next();
|
||||||
auto const& followerInfo = collection->followers();
|
itResult.next();
|
||||||
followerInfo->remove((*followers)[i]);
|
}
|
||||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
} else {
|
||||||
<< "removeLocal: dropping follower "
|
VPackArrayBuilder guard(&payload);
|
||||||
<< (*followers)[i] << " for shard " << collectionName;
|
doOneDoc(value, ourResult);
|
||||||
|
count++;
|
||||||
|
}
|
||||||
|
if (count > 0) {
|
||||||
|
auto body = std::make_shared<std::string>();
|
||||||
|
*body = payload.slice().toJson();
|
||||||
|
|
||||||
|
// Now prepare the requests:
|
||||||
|
std::vector<ClusterCommRequest> requests;
|
||||||
|
for (auto const& f : *followers) {
|
||||||
|
requests.emplace_back("server:" + f,
|
||||||
|
arangodb::rest::RequestType::DELETE_REQ,
|
||||||
|
path, body);
|
||||||
|
}
|
||||||
|
size_t nrDone = 0;
|
||||||
|
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||||
|
nrDone, Logger::REPLICATION);
|
||||||
|
if (nrGood < followers->size()) {
|
||||||
|
// we drop all followers that were not successful:
|
||||||
|
for (size_t i = 0; i < followers->size(); ++i) {
|
||||||
|
bool replicationWorked
|
||||||
|
= requests[i].done &&
|
||||||
|
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||||
|
(requests[i].result.answer_code ==
|
||||||
|
rest::ResponseCode::ACCEPTED ||
|
||||||
|
requests[i].result.answer_code ==
|
||||||
|
rest::ResponseCode::OK);
|
||||||
|
if (replicationWorked) {
|
||||||
|
bool found;
|
||||||
|
requests[i].result.answer->header(StaticStrings::ErrorCodes, found);
|
||||||
|
replicationWorked = !found;
|
||||||
|
}
|
||||||
|
if (!replicationWorked) {
|
||||||
|
auto const& followerInfo = collection->followers();
|
||||||
|
followerInfo->remove((*followers)[i]);
|
||||||
|
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||||
|
<< "removeLocal: dropping follower "
|
||||||
|
<< (*followers)[i] << " for shard " << collectionName;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2667,44 +2675,45 @@ OperationResult Transaction::truncateLocal(std::string const& collectionName,
|
||||||
|
|
||||||
// Now replicate the good operations on all followers:
|
// Now replicate the good operations on all followers:
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr only happens on controlled shutdown
|
||||||
|
std::string path
|
||||||
|
= "/_db/" +
|
||||||
|
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
||||||
|
"/_api/collection/" + collectionName + "/truncate";
|
||||||
|
|
||||||
std::string path
|
auto body = std::make_shared<std::string>();
|
||||||
= "/_db/" +
|
|
||||||
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
|
||||||
"/_api/collection/" + collectionName + "/truncate";
|
|
||||||
|
|
||||||
auto body = std::make_shared<std::string>();
|
// Now prepare the requests:
|
||||||
|
std::vector<ClusterCommRequest> requests;
|
||||||
// Now prepare the requests:
|
for (auto const& f : *followers) {
|
||||||
std::vector<ClusterCommRequest> requests;
|
requests.emplace_back("server:" + f,
|
||||||
for (auto const& f : *followers) {
|
arangodb::rest::RequestType::PUT,
|
||||||
requests.emplace_back("server:" + f,
|
path, body);
|
||||||
arangodb::rest::RequestType::PUT,
|
}
|
||||||
path, body);
|
size_t nrDone = 0;
|
||||||
}
|
size_t nrGood = cc->performRequests(requests, TRX_FOLLOWER_TIMEOUT,
|
||||||
size_t nrDone = 0;
|
nrDone, Logger::REPLICATION);
|
||||||
size_t nrGood = cc->performRequests(requests, TRX_FOLLOWER_TIMEOUT,
|
if (nrGood < followers->size()) {
|
||||||
nrDone, Logger::REPLICATION);
|
// we drop all followers that were not successful:
|
||||||
if (nrGood < followers->size()) {
|
for (size_t i = 0; i < followers->size(); ++i) {
|
||||||
// we drop all followers that were not successful:
|
bool replicationWorked
|
||||||
for (size_t i = 0; i < followers->size(); ++i) {
|
= requests[i].done &&
|
||||||
bool replicationWorked
|
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||||
= requests[i].done &&
|
(requests[i].result.answer_code ==
|
||||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
rest::ResponseCode::ACCEPTED ||
|
||||||
(requests[i].result.answer_code ==
|
requests[i].result.answer_code ==
|
||||||
rest::ResponseCode::ACCEPTED ||
|
rest::ResponseCode::OK);
|
||||||
requests[i].result.answer_code ==
|
if (!replicationWorked) {
|
||||||
rest::ResponseCode::OK);
|
auto const& followerInfo = collection->followers();
|
||||||
if (!replicationWorked) {
|
followerInfo->remove((*followers)[i]);
|
||||||
auto const& followerInfo = collection->followers();
|
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||||
followerInfo->remove((*followers)[i]);
|
<< "truncateLocal: dropping follower "
|
||||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
<< (*followers)[i] << " for shard " << collectionName;
|
||||||
<< "truncateLocal: dropping follower "
|
}
|
||||||
<< (*followers)[i] << " for shard " << collectionName;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -1348,6 +1348,9 @@ static bool clusterSendToAllServers(
|
||||||
arangodb::rest::RequestType const& method, std::string const& body) {
|
arangodb::rest::RequestType const& method, std::string const& body) {
|
||||||
ClusterInfo* ci = ClusterInfo::instance();
|
ClusterInfo* ci = ClusterInfo::instance();
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc == nullptr) {
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
std::string url = "/_db/" + StringUtils::urlEncode(dbname) + "/" + path;
|
std::string url = "/_db/" + StringUtils::urlEncode(dbname) + "/" + path;
|
||||||
|
|
||||||
// Have to propagate to DB Servers
|
// Have to propagate to DB Servers
|
||||||
|
|
|
@ -209,7 +209,11 @@ void addReplicationAuthentication(v8::Isolate* isolate,
|
||||||
if (!hasUsernamePassword) {
|
if (!hasUsernamePassword) {
|
||||||
auto cluster = application_features::ApplicationServer::getFeature<ClusterFeature>("Cluster");
|
auto cluster = application_features::ApplicationServer::getFeature<ClusterFeature>("Cluster");
|
||||||
if (cluster->isEnabled()) {
|
if (cluster->isEnabled()) {
|
||||||
config._jwt = ClusterComm::instance()->jwt();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
config._jwt = ClusterComm::instance()->jwt();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -2138,31 +2138,33 @@ static void ListDatabasesCoordinator(
|
||||||
if (!DBServers.empty()) {
|
if (!DBServers.empty()) {
|
||||||
ServerID sid = DBServers[0];
|
ServerID sid = DBServers[0];
|
||||||
auto cc = ClusterComm::instance();
|
auto cc = ClusterComm::instance();
|
||||||
|
if (cc != nullptr) {
|
||||||
|
// nullptr happens only during controlled shutdown
|
||||||
|
std::unordered_map<std::string, std::string> headers;
|
||||||
|
headers["Authentication"] = TRI_ObjectToString(args[2]);
|
||||||
|
auto res = cc->syncRequest(
|
||||||
|
"", 0, "server:" + sid, arangodb::rest::RequestType::GET,
|
||||||
|
"/_api/database/user", std::string(), headers, 0.0);
|
||||||
|
|
||||||
std::unordered_map<std::string, std::string> headers;
|
if (res->status == CL_COMM_SENT) {
|
||||||
headers["Authentication"] = TRI_ObjectToString(args[2]);
|
// We got an array back as JSON, let's parse it and build a v8
|
||||||
auto res = cc->syncRequest(
|
StringBuffer& body = res->result->getBody();
|
||||||
"", 0, "server:" + sid, arangodb::rest::RequestType::GET,
|
|
||||||
"/_api/database/user", std::string(), headers, 0.0);
|
|
||||||
|
|
||||||
if (res->status == CL_COMM_SENT) {
|
std::shared_ptr<VPackBuilder> builder =
|
||||||
// We got an array back as JSON, let's parse it and build a v8
|
VPackParser::fromJson(body.c_str(), body.length());
|
||||||
StringBuffer& body = res->result->getBody();
|
VPackSlice resultSlice = builder->slice();
|
||||||
|
|
||||||
std::shared_ptr<VPackBuilder> builder =
|
if (resultSlice.isObject()) {
|
||||||
VPackParser::fromJson(body.c_str(), body.length());
|
VPackSlice r = resultSlice.get("result");
|
||||||
VPackSlice resultSlice = builder->slice();
|
if (r.isArray()) {
|
||||||
|
uint32_t i = 0;
|
||||||
if (resultSlice.isObject()) {
|
v8::Handle<v8::Array> result = v8::Array::New(isolate);
|
||||||
VPackSlice r = resultSlice.get("result");
|
for (auto const& it : VPackArrayIterator(r)) {
|
||||||
if (r.isArray()) {
|
std::string v = it.copyString();
|
||||||
uint32_t i = 0;
|
result->Set(i++, TRI_V8_STD_STRING(v));
|
||||||
v8::Handle<v8::Array> result = v8::Array::New(isolate);
|
}
|
||||||
for (auto const& it : VPackArrayIterator(r)) {
|
TRI_V8_RETURN(result);
|
||||||
std::string v = it.copyString();
|
|
||||||
result->Set(i++, TRI_V8_STD_STRING(v));
|
|
||||||
}
|
}
|
||||||
TRI_V8_RETURN(result);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue