mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
b6df44a348
|
@ -1,6 +1,9 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* change undocumented behaviour in case of invalid revision ids in
|
||||
If-Match and If-None-Match headers from 400 (BAD) to 412 (PRECONDITION
|
||||
FAILED).
|
||||
|
||||
v3.2.alpha1 (2017-02-05)
|
||||
------------------------
|
||||
|
|
|
@ -12,6 +12,7 @@ By default, cloning the github repository will checkout **devel**. This version
|
|||
contains the development version of the ArangoDB. Use this branch if you want
|
||||
to make changes to the ArangoDB source.
|
||||
|
||||
On windows you first [need to allow and enable symlinks for your user](https://github.com/git-for-windows/git/wiki/Symbolic-Links#allowing-non-administrators-to-create-symbolic-links).
|
||||
We now use [git submodules](https://git-scm.com/docs/git-submodule) for V8 and Rocksdb.
|
||||
Since the V8 git repository also requires external addons to be present, we end
|
||||
up with recursive submodules. Thus a clone command now has to look like:
|
||||
|
@ -21,5 +22,8 @@ up with recursive submodules. Thus a clone command now has to look like:
|
|||
git submodule update --recursive
|
||||
git submodule update --init --recursive
|
||||
|
||||
|
||||
|
||||
|
||||
Please checkout the [cookbook](https://docs.arangodb.com/cookbook) on how to
|
||||
compile ArangoDB.
|
||||
|
|
|
@ -190,20 +190,20 @@ describe ArangoDB do
|
|||
hdr = { "if-match" => "\"*abcd\"" }
|
||||
doc = ArangoDB.log_delete("#{prefix}-rev-invalid", cmd, :headers => hdr )
|
||||
|
||||
doc.code.should eq(400)
|
||||
doc.code.should eq(412)
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['errorNum'].should eq(400)
|
||||
doc.parsed_response['code'].should eq(400)
|
||||
doc.parsed_response['errorNum'].should eq(1200)
|
||||
doc.parsed_response['code'].should eq(412)
|
||||
|
||||
# delete document, invalid revision
|
||||
cmd = "/_api/document/#{did}"
|
||||
hdr = { "if-match" => "'*abcd'" }
|
||||
doc = ArangoDB.log_delete("#{prefix}-rev-invalid", cmd, :headers => hdr)
|
||||
|
||||
doc.code.should eq(400)
|
||||
doc.code.should eq(412)
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['errorNum'].should eq(400)
|
||||
doc.parsed_response['code'].should eq(400)
|
||||
doc.parsed_response['errorNum'].should eq(1200)
|
||||
doc.parsed_response['code'].should eq(412)
|
||||
|
||||
# delete document, correct revision
|
||||
cmd = "/_api/document/#{did}"
|
||||
|
|
|
@ -567,7 +567,7 @@ describe ArangoDB do
|
|||
hdr = { "if-match" => "'*abcd'" }
|
||||
doc = ArangoDB.log_head("#{prefix}-head-rev-invalid", cmd, :headers => hdr)
|
||||
|
||||
doc.code.should eq(400)
|
||||
doc.code.should eq(412)
|
||||
end
|
||||
|
||||
end
|
||||
|
|
|
@ -266,20 +266,20 @@ describe ArangoDB do
|
|||
hdr = { "if-match" => "\"*abcd\"" }
|
||||
doc = ArangoDB.log_put("#{prefix}-rev-invalid", cmd, :headers => hdr, :body => body)
|
||||
|
||||
doc.code.should eq(400)
|
||||
doc.code.should eq(412)
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['errorNum'].should eq(400)
|
||||
doc.parsed_response['code'].should eq(400)
|
||||
doc.parsed_response['errorNum'].should eq(1200)
|
||||
doc.parsed_response['code'].should eq(412)
|
||||
|
||||
# update document, invalid revision
|
||||
cmd = "/_api/document/#{did}"
|
||||
hdr = { "if-match" => "'*abcd'" }
|
||||
doc = ArangoDB.log_put("#{prefix}-rev-invalid", cmd, :headers => hdr, :body => body)
|
||||
|
||||
doc.code.should eq(400)
|
||||
doc.code.should eq(412)
|
||||
doc.parsed_response['error'].should eq(true)
|
||||
doc.parsed_response['errorNum'].should eq(400)
|
||||
doc.parsed_response['code'].should eq(400)
|
||||
doc.parsed_response['errorNum'].should eq(1200)
|
||||
doc.parsed_response['code'].should eq(412)
|
||||
|
||||
# update document, correct revision
|
||||
cmd = "/_api/document/#{did}"
|
||||
|
|
|
@ -1529,7 +1529,15 @@ AgencyCommResult AgencyComm::send(
|
|||
<< "': " << body;
|
||||
|
||||
arangodb::httpclient::SimpleHttpClient client(connection, timeout, false);
|
||||
client.setJwt(ClusterComm::instance()->jwt());
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
result._message = "could not send request to agency because of shutdown";
|
||||
LOG_TOPIC(TRACE, Logger::AGENCYCOMM) << "could not send request to agency";
|
||||
|
||||
return result;
|
||||
}
|
||||
client.setJwt(cc->jwt());
|
||||
client.keepConnectionOnDestruction(true);
|
||||
|
||||
// set up headers
|
||||
|
|
|
@ -313,6 +313,11 @@ bool Agent::recvAppendEntriesRPC(
|
|||
|
||||
/// Leader's append entries
|
||||
void Agent::sendAppendEntriesRPC() {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
return;
|
||||
}
|
||||
|
||||
// _lastSent, _lastHighest and _confirmed only accessed in main thread
|
||||
std::string const myid = id();
|
||||
|
@ -387,7 +392,7 @@ void Agent::sendAppendEntriesRPC() {
|
|||
// Send request
|
||||
auto headerFields =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
cc->asyncRequest(
|
||||
"1", 1, _config.poolAt(followerId),
|
||||
arangodb::rest::RequestType::POST, path.str(),
|
||||
std::make_shared<std::string>(builder.toJson()), headerFields,
|
||||
|
@ -1002,6 +1007,11 @@ TimePoint const& Agent::leaderSince() const {
|
|||
|
||||
// Notify inactive pool members of configuration change()
|
||||
void Agent::notifyInactive() const {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
return;
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> pool = _config.pool();
|
||||
std::string path = "/_api/agency_priv/inform";
|
||||
|
@ -1023,7 +1033,7 @@ void Agent::notifyInactive() const {
|
|||
auto headerFields =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
cc->asyncRequest(
|
||||
"1", 1, p.second, arangodb::rest::RequestType::POST,
|
||||
path, std::make_shared<std::string>(out.toJson()), headerFields,
|
||||
nullptr, 1.0, true);
|
||||
|
|
|
@ -66,11 +66,15 @@ void AgentActivator::run() {
|
|||
|
||||
auto headerFields =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
cc->asyncRequest(
|
||||
"1", 1, endpoint, rest::RequestType::POST, path,
|
||||
std::make_shared<std::string>(allLogs->toJson()), headerFields,
|
||||
std::make_shared<ActivationCallback>(_agent, _failed, _replacement),
|
||||
5.0, true, 1.0);
|
||||
}
|
||||
|
||||
_cv.wait(10000000); // 10 sec
|
||||
|
||||
|
|
|
@ -389,11 +389,17 @@ void Constituent::callElection() {
|
|||
<< "&prevLogTerm=" << _agent->lastLog().term;
|
||||
|
||||
// Ask everyone for their vote
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// only happens on controlled shutdown
|
||||
follow(_term);
|
||||
return;
|
||||
}
|
||||
for (auto const& i : active) {
|
||||
if (i != _id) {
|
||||
auto headerFields =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
ClusterComm::instance()->asyncRequest(
|
||||
cc->asyncRequest(
|
||||
"", coordinatorTransactionID, _agent->config().poolAt(i),
|
||||
rest::RequestType::GET, path.str(),
|
||||
std::make_shared<std::string>(body), headerFields,
|
||||
|
@ -419,8 +425,7 @@ void Constituent::callElection() {
|
|||
break;
|
||||
}
|
||||
|
||||
auto res = ClusterComm::instance()->wait(
|
||||
"", coordinatorTransactionID, 0, "",
|
||||
auto res = cc->wait("", coordinatorTransactionID, 0, "",
|
||||
duration<double>(steady_clock::now()-timeout).count());
|
||||
|
||||
if (res.status == CL_COMM_SENT) {
|
||||
|
@ -461,7 +466,7 @@ void Constituent::callElection() {
|
|||
<< (yea >= majority ? "yeas" : "nays") << " have it.";
|
||||
|
||||
// Clean up
|
||||
ClusterComm::instance()->drop("", coordinatorTransactionID, 0, "");
|
||||
cc->drop("", coordinatorTransactionID, 0, "");
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -49,6 +49,11 @@ Inception::~Inception() { shutdown(); }
|
|||
/// - Create outgoing gossip.
|
||||
/// - Send to all peers
|
||||
void Inception::gossip() {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
return;
|
||||
}
|
||||
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Entering gossip phase ...";
|
||||
using namespace std::chrono;
|
||||
|
@ -93,7 +98,7 @@ void Inception::gossip() {
|
|||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
||||
<< out->toJson() << " to peer " << clientid;
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
cc->asyncRequest(
|
||||
clientid, 1, p, rest::RequestType::POST, path,
|
||||
std::make_shared<std::string>(out->toJson()), hf,
|
||||
std::make_shared<GossipCallback>(_agent, version), 1.0, true, 0.5);
|
||||
|
@ -116,7 +121,7 @@ void Inception::gossip() {
|
|||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Sending gossip message: "
|
||||
<< out->toJson() << " to pool member " << clientid;
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
cc->asyncRequest(
|
||||
clientid, 1, pair.second, rest::RequestType::POST, path,
|
||||
std::make_shared<std::string>(out->toJson()), hf,
|
||||
std::make_shared<GossipCallback>(_agent, version), 1.0, true, 0.5);
|
||||
|
@ -156,6 +161,11 @@ void Inception::gossip() {
|
|||
|
||||
|
||||
bool Inception::restartingActiveAgent() {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
return false;
|
||||
}
|
||||
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Restarting agent from persistence ...";
|
||||
|
||||
|
@ -200,7 +210,7 @@ bool Inception::restartingActiveAgent() {
|
|||
std::vector<std::string> informed;
|
||||
|
||||
for (auto& p : gp) {
|
||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
auto comres = cc->syncRequest(
|
||||
clientId, 1, p, rest::RequestType::POST, path, greetstr,
|
||||
std::unordered_map<std::string, std::string>(), 2.0);
|
||||
if (comres->status == CL_COMM_SENT) {
|
||||
|
@ -224,7 +234,7 @@ bool Inception::restartingActiveAgent() {
|
|||
|
||||
if (p.first != myConfig.id() && p.first != "") {
|
||||
|
||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
auto comres = cc->syncRequest(
|
||||
clientId, 1, p.second, rest::RequestType::POST, path, greetstr,
|
||||
std::unordered_map<std::string, std::string>(), 2.0);
|
||||
|
||||
|
@ -249,7 +259,7 @@ bool Inception::restartingActiveAgent() {
|
|||
|
||||
// Contact leader to update endpoint
|
||||
if (theirLeaderId != theirId) {
|
||||
comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
comres = cc->syncRequest(
|
||||
clientId, 1, theirLeaderEp, rest::RequestType::POST, path,
|
||||
greetstr, std::unordered_map<std::string, std::string>(), 2.0);
|
||||
// Failed to contact leader move on until we do. This way at
|
||||
|
@ -365,6 +375,11 @@ void Inception::reportVersionForEp(std::string const& endpoint, size_t version)
|
|||
|
||||
|
||||
bool Inception::estimateRAFTInterval() {
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
return false;
|
||||
}
|
||||
|
||||
using namespace std::chrono;
|
||||
LOG_TOPIC(INFO, Logger::AGENCY) << "Estimating RAFT timeouts ...";
|
||||
|
@ -382,7 +397,7 @@ bool Inception::estimateRAFTInterval() {
|
|||
std::string clientid = peer.first + std::to_string(i);
|
||||
auto hf =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
cc->asyncRequest(
|
||||
clientid, 1, peer.second, rest::RequestType::GET, path,
|
||||
std::make_shared<std::string>(), hf,
|
||||
std::make_shared<MeasureCallback>(this, peer.second, timeStamp()),
|
||||
|
@ -448,7 +463,7 @@ bool Inception::estimateRAFTInterval() {
|
|||
for (auto const& peer : config.pool()) {
|
||||
if (peer.first != myid) {
|
||||
auto clientId = "1";
|
||||
auto comres = arangodb::ClusterComm::instance()->syncRequest(
|
||||
auto comres = cc->syncRequest(
|
||||
clientId, 1, peer.second, rest::RequestType::POST, path,
|
||||
measjson, std::unordered_map<std::string, std::string>(), 5.0);
|
||||
}
|
||||
|
|
|
@ -352,11 +352,15 @@ std::vector<bool> Store::apply(
|
|||
auto headerFields =
|
||||
std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
|
||||
arangodb::ClusterComm::instance()->asyncRequest(
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
cc->asyncRequest(
|
||||
"1", 1, endpoint, rest::RequestType::POST, path,
|
||||
std::make_shared<std::string>(body.toString()), headerFields,
|
||||
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true, 0.01);
|
||||
|
||||
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true,
|
||||
0.01);
|
||||
}
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::AGENCY) << "Malformed URL " << url;
|
||||
}
|
||||
|
|
|
@ -1233,6 +1233,8 @@ std::unique_ptr<ClusterCommResult> RemoteBlock::sendRequest(
|
|||
std::string const& body) const {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
|
||||
// Later, we probably want to set these sensibly:
|
||||
ClientTransactionID const clientTransactionId = "AQL";
|
||||
|
@ -1257,6 +1259,8 @@ std::unique_ptr<ClusterCommResult> RemoteBlock::sendRequest(
|
|||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
return std::make_unique<ClusterCommResult>();
|
||||
|
||||
// cppcheck-suppress style
|
||||
DEBUG_END_BLOCK();
|
||||
|
|
|
@ -582,6 +582,8 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
// << "\n";
|
||||
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
|
||||
std::string const url("/_db/"
|
||||
+ arangodb::basics::StringUtils::urlEncode(collection->vocbase->name()) +
|
||||
|
@ -593,6 +595,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
arangodb::rest::RequestType::POST,
|
||||
url, body, headers, nullptr, 30.0);
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief aggregateQueryIds, get answers for all shards in a Scatter/Gather
|
||||
void aggregateQueryIds(EngineInfo* info,
|
||||
|
@ -670,8 +673,8 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
// now send the plan to the remote servers
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
TRI_ASSERT(cc != nullptr);
|
||||
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
// iterate over all shards of the collection
|
||||
size_t nr = 0;
|
||||
auto shardIds = collection->shardIds();
|
||||
|
@ -693,6 +696,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
aggregateQueryIds(info, cc, coordTransactionID, collection);
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief buildEngineCoordinator, for a single piece
|
||||
ExecutionEngine* buildEngineCoordinator(EngineInfo* info) {
|
||||
|
@ -931,6 +935,10 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
|
|||
query->vocbase()->name()) +
|
||||
"/_internal/traverser");
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
return;
|
||||
}
|
||||
bool hasVars = false;
|
||||
VPackBuilder varInfo;
|
||||
std::vector<aql::Variable const*> vars;
|
||||
|
@ -1229,6 +1237,11 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
// Lock shard on DBserver:
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
THROW_ARANGO_EXCEPTION( TRI_ERROR_SHUTTING_DOWN);
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = query->vocbase();
|
||||
std::unique_ptr<ClusterCommResult> res;
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
|
@ -1263,6 +1276,8 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
// the DBservers via HTTP:
|
||||
TRI_vocbase_t* vocbase = query->vocbase();
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens during controlled shutdown
|
||||
for (auto& q : inst.get()->queryIds) {
|
||||
std::string theId = q.first;
|
||||
std::string queryId = q.second;
|
||||
|
@ -1330,6 +1345,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
throw;
|
||||
}
|
||||
} else {
|
||||
|
|
|
@ -192,6 +192,8 @@ int TraversalBlock::shutdown(int errorCode) {
|
|||
// We have to clean up the engines in Coordinator Case.
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled server shutdown
|
||||
std::string const url(
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(_trx->vocbase()->name()) +
|
||||
"/_internal/traverser/");
|
||||
|
@ -212,6 +214,7 @@ int TraversalBlock::shutdown(int errorCode) {
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ExecutionBlock::shutdown(errorCode);
|
||||
|
||||
|
|
|
@ -562,6 +562,10 @@ int revisionOnCoordinator(std::string const& dbname,
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
|
@ -636,6 +640,10 @@ int figuresOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
|
@ -701,6 +709,10 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
result.clear();
|
||||
|
||||
|
@ -771,6 +783,10 @@ int createDocumentOnCoordinator(
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
|
@ -906,6 +922,10 @@ int deleteDocumentOnCoordinator(
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
|
@ -1135,6 +1155,10 @@ int truncateCollectionOnCoordinator(std::string const& dbname,
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
|
@ -1191,6 +1215,10 @@ int getDocumentOnCoordinator(
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo;
|
||||
|
@ -1462,6 +1490,10 @@ int fetchEdgesFromEngines(
|
|||
size_t& filtered,
|
||||
size_t& read) {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
// TODO map id => ServerID if possible
|
||||
// And go fast-path
|
||||
|
||||
|
@ -1546,6 +1578,10 @@ void fetchVerticesFromEngines(
|
|||
result,
|
||||
VPackBuilder& builder) {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return;
|
||||
}
|
||||
// TODO map id => ServerID if possible
|
||||
// And go fast-path
|
||||
|
||||
|
@ -1636,6 +1672,10 @@ int getFilteredEdgesOnCoordinator(
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo =
|
||||
|
@ -1755,6 +1795,10 @@ int modifyDocumentOnCoordinator(
|
|||
// Set a few variables needed for our work:
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
std::shared_ptr<LogicalCollection> collinfo =
|
||||
|
@ -2005,6 +2049,10 @@ int modifyDocumentOnCoordinator(
|
|||
int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
std::vector<ServerID> DBservers = ci->getCurrentDBServers();
|
||||
CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
std::string url = std::string("/_admin/wal/flush?waitForSync=") +
|
||||
|
|
|
@ -745,7 +745,8 @@ bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) {
|
|||
ClusterInfo::instance()->flush();
|
||||
|
||||
// turn on error logging now
|
||||
if (!ClusterComm::instance()->enableConnectionErrorLogging(true)) {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc != nullptr && cc->enableConnectionErrorLogging(true)) {
|
||||
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
|
||||
<< "created coordinator databases for the first time";
|
||||
}
|
||||
|
|
|
@ -1811,8 +1811,8 @@ static void JS_AsyncRequest(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"clustercomm object not found");
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_SHUTTING_DOWN,
|
||||
"clustercomm object not found (JS_AsyncRequest)");
|
||||
}
|
||||
|
||||
arangodb::rest::RequestType reqType;
|
||||
|
@ -1878,7 +1878,7 @@ static void JS_SyncRequest(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_SHUTTING_DOWN,
|
||||
"clustercomm object not found");
|
||||
}
|
||||
|
||||
|
@ -1931,7 +1931,7 @@ static void JS_Enquire(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
|
||||
if (cc == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"clustercomm object not found");
|
||||
"clustercomm object not found (JS_SyncRequest)");
|
||||
}
|
||||
|
||||
OperationID operationID = TRI_ObjectToUInt64(args[0], true);
|
||||
|
@ -1967,8 +1967,8 @@ static void JS_Wait(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"clustercomm object not found");
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_SHUTTING_DOWN,
|
||||
"clustercomm object not found (JS_Wait)");
|
||||
}
|
||||
|
||||
ClientTransactionID myclientTransactionID = "";
|
||||
|
@ -2038,7 +2038,7 @@ static void JS_Drop(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
|
||||
if (cc == nullptr) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"clustercomm object not found");
|
||||
"clustercomm object not found (JS_Drop)");
|
||||
}
|
||||
|
||||
ClientTransactionID myclientTransactionID = "";
|
||||
|
@ -2116,9 +2116,13 @@ static void JS_ClusterDownload(v8::FunctionCallbackInfo<v8::Value> const& args)
|
|||
}
|
||||
options->Set(TRI_V8_ASCII_STRING("headers"), headers);
|
||||
|
||||
std::string const authorization = "bearer " + ClusterComm::instance()->jwt();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
std::string authorization = "bearer " + ClusterComm::instance()->jwt();
|
||||
v8::Handle<v8::String> v8Authorization = TRI_V8_STD_STRING(authorization);
|
||||
headers->Set(TRI_V8_ASCII_STRING("Authorization"), v8Authorization);
|
||||
}
|
||||
args[2] = options;
|
||||
}
|
||||
TRI_V8_TRY_CATCH_END
|
||||
|
|
|
@ -203,23 +203,19 @@ bool RestDocumentHandler::readSingleDocument(bool generateBody) {
|
|||
|
||||
// check for an etag
|
||||
bool isValidRevision;
|
||||
TRI_voc_rid_t const ifNoneRid =
|
||||
TRI_voc_rid_t ifNoneRid =
|
||||
extractRevision("if-none-match", isValidRevision);
|
||||
if (!isValidRevision) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
||||
return false;
|
||||
ifNoneRid = 1; // an impossible rev, so precondition failed will happen
|
||||
}
|
||||
|
||||
OperationOptions options;
|
||||
options.ignoreRevs = true;
|
||||
|
||||
TRI_voc_rid_t const ifRid =
|
||||
TRI_voc_rid_t ifRid =
|
||||
extractRevision("if-match", isValidRevision);
|
||||
if (!isValidRevision) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
||||
return false;
|
||||
ifRid = 1; // an impossible rev, so precondition failed will happen
|
||||
}
|
||||
|
||||
VPackBuilder builder;
|
||||
|
@ -396,9 +392,7 @@ bool RestDocumentHandler::modifyDocument(bool isPatch) {
|
|||
bool isValidRevision;
|
||||
revision = extractRevision("if-match", isValidRevision);
|
||||
if (!isValidRevision) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
||||
return false;
|
||||
revision = 1; // an impossible revision, so precondition failed
|
||||
}
|
||||
VPackSlice keyInBody = body.get(StaticStrings::KeyString);
|
||||
if ((revision != 0 && TRI_ExtractRevisionId(body) != revision) ||
|
||||
|
@ -502,9 +496,7 @@ bool RestDocumentHandler::deleteDocument() {
|
|||
bool isValidRevision = false;
|
||||
revision = extractRevision("if-match", isValidRevision);
|
||||
if (!isValidRevision) {
|
||||
generateError(rest::ResponseCode::BAD,
|
||||
TRI_ERROR_HTTP_BAD_PARAMETER, "invalid revision number");
|
||||
return false;
|
||||
revision = 1; // an impossible revision, so precondition failed
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -775,6 +775,12 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
|
|||
|
||||
// Set a few variables needed for our work:
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_SHUTTING_DOWN,
|
||||
"shutting down server");
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<ClusterCommResult> res;
|
||||
if (!useVpp) {
|
||||
|
|
|
@ -1917,6 +1917,8 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
|||
path, body);
|
||||
}
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION);
|
||||
|
@ -1946,6 +1948,7 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (options.silent) {
|
||||
// We needed the results, but do not want to report:
|
||||
|
@ -2175,7 +2178,8 @@ OperationResult Transaction::modifyLocal(
|
|||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
std::string path
|
||||
= "/_db/" +
|
||||
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
||||
|
@ -2256,6 +2260,7 @@ OperationResult Transaction::modifyLocal(
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (options.silent) {
|
||||
// We needed the results, but do not want to report:
|
||||
|
@ -2417,6 +2422,8 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controled shutdown
|
||||
|
||||
std::string path
|
||||
= "/_db/" +
|
||||
|
@ -2496,6 +2503,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (options.silent) {
|
||||
// We needed the results, but do not want to report:
|
||||
|
@ -2667,7 +2675,8 @@ OperationResult Transaction::truncateLocal(std::string const& collectionName,
|
|||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
std::string path
|
||||
= "/_db/" +
|
||||
arangodb::basics::StringUtils::urlEncode(_vocbase->name()) +
|
||||
|
@ -2704,7 +2713,7 @@ OperationResult Transaction::truncateLocal(std::string const& collectionName,
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1348,6 +1348,9 @@ static bool clusterSendToAllServers(
|
|||
arangodb::rest::RequestType const& method, std::string const& body) {
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
std::string url = "/_db/" + StringUtils::urlEncode(dbname) + "/" + path;
|
||||
|
||||
// Have to propagate to DB Servers
|
||||
|
|
|
@ -209,10 +209,14 @@ void addReplicationAuthentication(v8::Isolate* isolate,
|
|||
if (!hasUsernamePassword) {
|
||||
auto cluster = application_features::ApplicationServer::getFeature<ClusterFeature>("Cluster");
|
||||
if (cluster->isEnabled()) {
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
config._jwt = ClusterComm::instance()->jwt();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief sync data from a remote master
|
||||
|
|
|
@ -2138,7 +2138,8 @@ static void ListDatabasesCoordinator(
|
|||
if (!DBServers.empty()) {
|
||||
ServerID sid = DBServers[0];
|
||||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc != nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
headers["Authentication"] = TRI_ObjectToString(args[2]);
|
||||
auto res = cc->syncRequest(
|
||||
|
@ -2167,6 +2168,7 @@ static void ListDatabasesCoordinator(
|
|||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if (++tries >= 2) {
|
||||
break;
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue