mirror of https://gitee.com/bigwinds/arangodb
Convert many uses of ClusterComm to Fuerte (#10154)
This commit is contained in:
parent
4f70d15dc9
commit
13e24b2db9
|
@ -91,6 +91,7 @@ struct RequestHeader final : public MessageHeader {
|
|||
// accept header accessors
|
||||
ContentType acceptType() const { return _acceptType; }
|
||||
void acceptType(ContentType type) { _acceptType = type; }
|
||||
void acceptType(std::string const& type) { _acceptType = to_ContentType(type); }
|
||||
|
||||
// query parameter helpers
|
||||
void addParameter(std::string const& key, std::string const& value);
|
||||
|
@ -207,7 +208,7 @@ class Request final : public Message {
|
|||
};
|
||||
|
||||
// Response contains the message resulting from a request to a server.
|
||||
class Response final : public Message {
|
||||
class Response : public Message {
|
||||
public:
|
||||
Response(ResponseHeader reqHeader = ResponseHeader())
|
||||
: header(std::move(reqHeader)), _payloadOffset(0) {}
|
||||
|
|
|
@ -47,8 +47,6 @@ namespace transaction {
|
|||
class Methods;
|
||||
}
|
||||
|
||||
struct ClusterCommResult;
|
||||
|
||||
namespace aql {
|
||||
class AqlItemBlock;
|
||||
struct Collection;
|
||||
|
|
|
@ -21,6 +21,11 @@
|
|||
/// @author Max Neunhoeffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include "ClusterNodes.h"
|
||||
|
||||
#include "Aql/AqlValue.h"
|
||||
|
@ -45,11 +50,9 @@
|
|||
#include "Aql/types.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Transaction/Methods.h"
|
||||
|
||||
#include <type_traits>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::aql;
|
||||
|
|
|
@ -24,7 +24,7 @@
|
|||
#ifndef ARANGOD_AQL_COLLECTION_H
|
||||
#define ARANGOD_AQL_COLLECTION_H 1
|
||||
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterTypes.h"
|
||||
#include "VocBase/AccessMode.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
|
|
|
@ -29,10 +29,14 @@
|
|||
#include "Aql/GraphNode.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/QuerySnippet.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ClusterTrxMethods.h"
|
||||
#include "Graph/BaseOptions.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
|
||||
|
@ -66,10 +70,6 @@ Result ExtractRemoteAndShard(VPackSlice keySlice, size_t& remoteId, std::string&
|
|||
return {TRI_ERROR_NO_ERROR};
|
||||
}
|
||||
|
||||
struct NoopCb final : public arangodb::ClusterCommCallback {
|
||||
bool operator()(ClusterCommResult*) override { return true; }
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
EngineInfoContainerDBServerServerBased::TraverserEngineShardLists::TraverserEngineShardLists(
|
||||
|
@ -278,8 +278,9 @@ Result EngineInfoContainerDBServerServerBased::buildEngines(
|
|||
// Otherwise the locking needs to be empty.
|
||||
TRI_ASSERT(!_closedSnippets.empty() || !_graphNodes.empty());
|
||||
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
NetworkFeature const& nf = _query.vocbase().server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool == nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
return {TRI_ERROR_SHUTTING_DOWN};
|
||||
}
|
||||
|
@ -290,13 +291,15 @@ Result EngineInfoContainerDBServerServerBased::buildEngines(
|
|||
"/_db/" + arangodb::basics::StringUtils::urlEncode(_query.vocbase().name()) +
|
||||
"/_api/aql/setup?ttl=" + std::to_string(ttl));
|
||||
|
||||
auto cleanupGuard = scopeGuard([this, &cc, &queryIds]() {
|
||||
cleanupEngines(cc, TRI_ERROR_INTERNAL, _query.vocbase().name(), queryIds);
|
||||
auto cleanupGuard = scopeGuard([this, pool, &queryIds]() {
|
||||
cleanupEngines(pool, TRI_ERROR_INTERNAL, _query.vocbase().name(), queryIds);
|
||||
});
|
||||
|
||||
// Build Lookup Infos
|
||||
VPackBuilder infoBuilder;
|
||||
transaction::Methods* trx = _query.trx();
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(SETUP_TIMEOUT);
|
||||
|
||||
for (auto const& server : dbServers) {
|
||||
std::string const serverDest = "server:" + server;
|
||||
|
@ -344,22 +347,29 @@ Result EngineInfoContainerDBServerServerBased::buildEngines(
|
|||
!infoSlice.get("snippets").isEmptyObject()) ||
|
||||
infoSlice.hasKey("traverserEngines"));
|
||||
|
||||
VPackBuffer<uint8_t> buffer(infoSlice.byteSize());
|
||||
buffer.append(infoSlice.begin(), infoSlice.byteSize());
|
||||
|
||||
// add the transaction ID header
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
network::Headers headers;
|
||||
ClusterTrxMethods::addAQLTransactionHeader(*trx, server, headers);
|
||||
CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto res = cc->syncRequest(coordTransactionID, serverDest, RequestType::POST,
|
||||
url, infoSlice.toJson(), headers, SETUP_TIMEOUT);
|
||||
auto res = network::sendRequest(pool, serverDest, fuerte::RestVerb::Post,
|
||||
url, std::move(buffer), headers, options)
|
||||
.get();
|
||||
_query.incHttpRequests(1);
|
||||
if (res->getErrorCode() != TRI_ERROR_NO_ERROR) {
|
||||
if (res.fail()) {
|
||||
int code = network::fuerteToArangoErrorCode(res);
|
||||
std::string message = network::fuerteToArangoErrorMessage(res);
|
||||
LOG_TOPIC("f9a77", DEBUG, Logger::AQL)
|
||||
<< server << " responded with " << res->getErrorCode() << " -> "
|
||||
<< res->stringifyErrorMessage();
|
||||
<< server << " responded with " << code << " -> " << message;
|
||||
LOG_TOPIC("41082", TRACE, Logger::AQL) << infoSlice.toJson();
|
||||
return {res->getErrorCode(), res->stringifyErrorMessage()};
|
||||
return {code, message};
|
||||
}
|
||||
std::shared_ptr<VPackBuilder> builder = res->result->getBodyVelocyPack();
|
||||
VPackSlice response = builder->slice();
|
||||
auto slices = res.response->slices();
|
||||
if (slices.empty()) {
|
||||
return {TRI_ERROR_INTERNAL, "malformed response while building engines"};
|
||||
}
|
||||
VPackSlice response = slices[0];
|
||||
auto result = parseResponse(response, queryIds, server, serverDest, didCreateEngine);
|
||||
if (!result.ok()) {
|
||||
return result;
|
||||
|
@ -447,48 +457,53 @@ Result EngineInfoContainerDBServerServerBased::parseResponse(
|
|||
* they may be leftovers from Coordinator.
|
||||
* Will also clear the list of queryIds after return.
|
||||
*
|
||||
* @param cc The ClusterComm
|
||||
* @param pool The ConnectionPool
|
||||
* @param errorCode error Code to be send to DBServers for logging.
|
||||
* @param dbname Name of the database this query is executed in.
|
||||
* @param queryIds A map of QueryIds of the format: (remoteNodeId:shardId)
|
||||
* -> queryid.
|
||||
*/
|
||||
void EngineInfoContainerDBServerServerBased::cleanupEngines(
|
||||
std::shared_ptr<ClusterComm> cc, int errorCode, std::string const& dbname,
|
||||
network::ConnectionPool* pool, int errorCode, std::string const& dbname,
|
||||
MapRemoteToSnippet& queryIds) const {
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(10.0); // Picked arbitrarily
|
||||
network::Headers headers;
|
||||
|
||||
// Shutdown query snippets
|
||||
std::string url("/_db/" + arangodb::basics::StringUtils::urlEncode(dbname) +
|
||||
"/_api/aql/shutdown/");
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
auto body = std::make_shared<std::string>(
|
||||
"{\"code\":" + std::to_string(errorCode) + "}");
|
||||
VPackBuffer<uint8_t> body;
|
||||
VPackBuilder builder(body);
|
||||
builder.openObject();
|
||||
builder.add("code", VPackValue(std::to_string(errorCode)));
|
||||
builder.close();
|
||||
for (auto const& it : queryIds) {
|
||||
// it.first == RemoteNodeId, we don't need this
|
||||
// it.second server -> [snippets]
|
||||
for (auto const& serToSnippets : it.second) {
|
||||
auto server = serToSnippets.first;
|
||||
for (auto const& shardId : serToSnippets.second) {
|
||||
requests.emplace_back(server, rest::RequestType::PUT, url + shardId, body);
|
||||
// fire and forget
|
||||
network::sendRequest(pool, server, fuerte::RestVerb::Put, url + shardId,
|
||||
body, headers, options);
|
||||
}
|
||||
_query.incHttpRequests(serToSnippets.second.size());
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown traverser engines
|
||||
url = "/_db/" + arangodb::basics::StringUtils::urlEncode(dbname) +
|
||||
"/_internal/traverser/";
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
std::shared_ptr<std::string> noBody;
|
||||
VPackBuffer<uint8_t> noBody;
|
||||
|
||||
CoordTransactionID coordinatorTransactionID = TRI_NewTickServer();
|
||||
auto cb = std::make_shared<::NoopCb>();
|
||||
|
||||
constexpr double shortTimeout = 10.0; // Picked arbitrarily
|
||||
for (auto const& gn : _graphNodes) {
|
||||
auto allEngines = gn->engines();
|
||||
for (auto const& engine : *allEngines) {
|
||||
cc->asyncRequest(coordinatorTransactionID, engine.first, rest::RequestType::DELETE_REQ,
|
||||
url + basics::StringUtils::itoa(engine.second), noBody,
|
||||
headers, cb, shortTimeout, false, 2.0);
|
||||
// fire and forget
|
||||
network::sendRequestRetry(pool, engine.first, fuerte::RestVerb::Delete,
|
||||
url + basics::StringUtils::itoa(engine.second),
|
||||
noBody, headers, options);
|
||||
}
|
||||
_query.incHttpRequests(allEngines->size());
|
||||
}
|
||||
|
|
|
@ -36,7 +36,9 @@
|
|||
#include <stack>
|
||||
|
||||
namespace arangodb {
|
||||
class ClusterComm;
|
||||
namespace network {
|
||||
class ConnectionPool;
|
||||
}
|
||||
|
||||
namespace velocypack {
|
||||
class Builder;
|
||||
|
@ -133,13 +135,13 @@ class EngineInfoContainerDBServerServerBased {
|
|||
* they may be leftovers from Coordinator.
|
||||
* Will also clear the list of queryIds after return.
|
||||
*
|
||||
* @param cc The ClusterComm
|
||||
* @param pool The ConnectionPool
|
||||
* @param errorCode error Code to be send to DBServers for logging.
|
||||
* @param dbname Name of the database this query is executed in.
|
||||
* @param queryIds A map of QueryIds of the format: (remoteNodeId:shardId)
|
||||
* -> queryid.
|
||||
*/
|
||||
void cleanupEngines(std::shared_ptr<ClusterComm> cc, int errorCode,
|
||||
void cleanupEngines(network::ConnectionPool* pool, int errorCode,
|
||||
std::string const& dbname, MapRemoteToSnippet& queryIds) const;
|
||||
|
||||
// Insert a GraphNode that needs to generate TraverserEngines on
|
||||
|
|
|
@ -34,8 +34,6 @@
|
|||
#include <vector>
|
||||
|
||||
namespace arangodb {
|
||||
struct ClusterCommResult;
|
||||
|
||||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
|
|
|
@ -39,9 +39,9 @@
|
|||
#include "Aql/ReturnExecutor.h"
|
||||
#include "Aql/WalkerWorker.h"
|
||||
#include "Basics/ScopeGuard.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
@ -467,8 +467,10 @@ struct DistributedQueryInstanciator final : public WalkerWorker<ExecutionNode> {
|
|||
// QueryIds are filled by responses of DBServer parts.
|
||||
MapRemoteToSnippet queryIds{};
|
||||
|
||||
auto cleanupGuard = scopeGuard([this, &queryIds]() {
|
||||
_dbserverParts.cleanupEngines(ClusterComm::instance(), TRI_ERROR_INTERNAL,
|
||||
NetworkFeature const& nf = _query.vocbase().server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
auto cleanupGuard = scopeGuard([this, pool, &queryIds]() {
|
||||
_dbserverParts.cleanupEngines(pool, TRI_ERROR_INTERNAL,
|
||||
_query.vocbase().name(), queryIds);
|
||||
});
|
||||
std::unordered_map<size_t, size_t> nodeAliases;
|
||||
|
|
|
@ -61,6 +61,7 @@
|
|||
#include "Aql/TraversalNode.h"
|
||||
#include "Aql/WalkerWorker.h"
|
||||
#include "Aql/MaterializeExecutor.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/system-compiler.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Meta/static_assert_size.h"
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
/// @author Michael Hackstein
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "ShardLocking.h"
|
||||
|
||||
#include "Aql/Collection.h"
|
||||
|
@ -30,8 +32,7 @@
|
|||
#include "Aql/ModificationNodes.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include "Logger/LogMacros.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
|
|
@ -28,7 +28,6 @@
|
|||
#include "Aql/SingleRowFetcher.h"
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "ModificationExecutorTraits.h"
|
||||
|
|
|
@ -791,11 +791,11 @@ int handleGeneralCommErrors(arangodb::ClusterCommResult const* res) {
|
|||
/// @brief creates a copy of all HTTP headers to forward
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::unordered_map<std::string, std::string> getForwardableRequestHeaders(arangodb::GeneralRequest* request) {
|
||||
network::Headers getForwardableRequestHeaders(arangodb::GeneralRequest* request) {
|
||||
std::unordered_map<std::string, std::string> const& headers = request->headers();
|
||||
std::unordered_map<std::string, std::string>::const_iterator it = headers.begin();
|
||||
|
||||
std::unordered_map<std::string, std::string> result;
|
||||
network::Headers result;
|
||||
|
||||
while (it != headers.end()) {
|
||||
std::string const& key = (*it).first;
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/TraverserEngineRegistry.h"
|
||||
#include "Futures/Future.h"
|
||||
#include "Network/types.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
#include "Rest/GeneralResponse.h"
|
||||
#include "Utils/OperationResult.h"
|
||||
|
@ -56,7 +57,7 @@ int handleGeneralCommErrors(arangodb::ClusterCommResult const* res);
|
|||
/// @brief creates a copy of all HTTP headers to forward
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::unordered_map<std::string, std::string> getForwardableRequestHeaders(GeneralRequest*);
|
||||
network::Headers getForwardableRequestHeaders(GeneralRequest*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check if a list of attributes have the same values in two vpack
|
||||
|
|
|
@ -22,18 +22,22 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ClusterTraverser.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Graph/BreadthFirstEnumerator.h"
|
||||
#include "Graph/ClusterTraverserCache.h"
|
||||
#include "Graph/TraverserCache.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::graph;
|
||||
|
||||
|
@ -152,9 +156,9 @@ void ClusterTraverser::addVertexToVelocyPack(arangodb::velocypack::StringRef vid
|
|||
|
||||
void ClusterTraverser::destroyEngines() {
|
||||
// We have to clean up the engines in Coordinator Case.
|
||||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc != nullptr) {
|
||||
NetworkFeature const& nf = _trx->vocbase().server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool != nullptr) {
|
||||
// nullptr only happens on controlled server shutdown
|
||||
std::string const url(
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(_trx->vocbase().name()) +
|
||||
|
@ -162,24 +166,26 @@ void ClusterTraverser::destroyEngines() {
|
|||
|
||||
if (_enumerator != nullptr) {
|
||||
_enumerator->incHttpRequests(_engines->size());
|
||||
}
|
||||
}
|
||||
|
||||
VPackBuffer<uint8_t> body;
|
||||
network::Headers headers;
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(30.0);
|
||||
|
||||
for (auto const& it : *_engines) {
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
auto res = cc->syncRequest(coordTransactionID, "server:" + it.first,
|
||||
RequestType::DELETE_REQ,
|
||||
url + arangodb::basics::StringUtils::itoa(it.second),
|
||||
"", headers, 30.0);
|
||||
auto res =
|
||||
network::sendRequest(pool, "server:" + it.first, fuerte::RestVerb::Delete,
|
||||
url + arangodb::basics::StringUtils::itoa(it.second),
|
||||
body, headers, options);
|
||||
res.wait();
|
||||
|
||||
if (res->status != CL_COMM_SENT) {
|
||||
// Note If there was an error on server side we do not have
|
||||
// CL_COMM_SENT
|
||||
if (!res.hasValue() || res.get().fail()) {
|
||||
// Note If there was an error on server side we do not have ok()
|
||||
std::string message("Could not destroy all traversal engines");
|
||||
|
||||
if (!res->errorMessage.empty()) {
|
||||
message += std::string(": ") + res->errorMessage;
|
||||
if (res.hasValue()) {
|
||||
message += ": " + network::fuerteToArangoErrorMessage(res.get());
|
||||
}
|
||||
|
||||
LOG_TOPIC("8a7a0", ERR, arangodb::Logger::FIXME) << message;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -377,8 +377,9 @@ template void addTransactionHeader<std::unordered_map<std::string, std::string>>
|
|||
std::unordered_map<std::string, std::string>&);
|
||||
|
||||
/// @brief add transaction ID header for setting up AQL snippets
|
||||
void addAQLTransactionHeader(transaction::Methods const& trx, ServerID const& server,
|
||||
std::unordered_map<std::string, std::string>& headers) {
|
||||
template <typename MapT>
|
||||
void addAQLTransactionHeader(transaction::Methods const& trx,
|
||||
ServerID const& server, MapT& headers) {
|
||||
TransactionState& state = *trx.state();
|
||||
TRI_ASSERT(state.isCoordinator());
|
||||
if (!ClusterTrxMethods::isElCheapo(trx)) {
|
||||
|
@ -402,6 +403,11 @@ void addAQLTransactionHeader(transaction::Methods const& trx, ServerID const& se
|
|||
}
|
||||
headers.emplace(arangodb::StaticStrings::TransactionId, std::move(value));
|
||||
}
|
||||
template void addAQLTransactionHeader<std::map<std::string, std::string>>(
|
||||
transaction::Methods const&, ServerID const&, std::map<std::string, std::string>&);
|
||||
template void addAQLTransactionHeader<std::unordered_map<std::string, std::string>>(
|
||||
transaction::Methods const&, ServerID const&,
|
||||
std::unordered_map<std::string, std::string>&);
|
||||
|
||||
bool isElCheapo(transaction::Methods const& trx) {
|
||||
return isElCheapo(*trx.state());
|
||||
|
|
|
@ -54,8 +54,9 @@ void addTransactionHeader(transaction::Methods const& trx,
|
|||
ServerID const& server, MapT& headers);
|
||||
|
||||
/// @brief add transaction ID header for setting up AQL snippets
|
||||
void addAQLTransactionHeader(transaction::Methods const& trx, ServerID const& server,
|
||||
std::unordered_map<std::string, std::string>& headers);
|
||||
template <typename MapT>
|
||||
void addAQLTransactionHeader(transaction::Methods const& trx,
|
||||
ServerID const& server, MapT& headers);
|
||||
|
||||
/// @brief check whether this is a kind el cheapo transaction
|
||||
bool isElCheapo(transaction::Methods const& trx);
|
||||
|
|
|
@ -27,13 +27,16 @@
|
|||
#include "Agency/AgencyStrings.h"
|
||||
#include "Agency/TimeString.h"
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ActionDescription.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Cluster/MaintenanceFeature.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "Replication/DatabaseInitialSyncer.h"
|
||||
#include "Replication/DatabaseReplicationApplier.h"
|
||||
#include "Replication/DatabaseTailingSyncer.h"
|
||||
|
@ -121,14 +124,6 @@ SynchronizeShard::SynchronizeShard(MaintenanceFeature& feature, ActionDescriptio
|
|||
|
||||
SynchronizeShard::~SynchronizeShard() = default;
|
||||
|
||||
class SynchronizeShardCallback : public arangodb::ClusterCommCallback {
|
||||
public:
|
||||
explicit SynchronizeShardCallback(SynchronizeShard* callee) {}
|
||||
virtual bool operator()(arangodb::ClusterCommResult*) override final {
|
||||
return true;
|
||||
}
|
||||
};
|
||||
|
||||
static std::stringstream& AppendShardInformationToMessage(
|
||||
std::string const& database, std::string const& shard, std::string const& planId,
|
||||
std::chrono::system_clock::time_point const& startTime, std::stringstream& msg) {
|
||||
|
@ -140,27 +135,26 @@ static std::stringstream& AppendShardInformationToMessage(
|
|||
return msg;
|
||||
}
|
||||
|
||||
static arangodb::Result getReadLockId(std::string const& endpoint,
|
||||
static arangodb::Result getReadLockId(network::ConnectionPool* pool,
|
||||
std::string const& endpoint,
|
||||
std::string const& database, std::string const& clientId,
|
||||
double timeout, uint64_t& id) {
|
||||
std::string error("startReadLockOnLeader: Failed to get read lock - ");
|
||||
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) { // nullptr only happens during controlled shutdown
|
||||
if (pool == nullptr) { // nullptr only happens during controlled shutdown
|
||||
return arangodb::Result(TRI_ERROR_SHUTTING_DOWN,
|
||||
"startReadLockOnLeader: Shutting down");
|
||||
}
|
||||
|
||||
auto comres =
|
||||
cc->syncRequest(TRI_NewTickServer(), endpoint, rest::RequestType::GET,
|
||||
DB + database + REPL_HOLD_READ_LOCK, std::string(),
|
||||
std::unordered_map<std::string, std::string>(), timeout);
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(timeout);
|
||||
auto res = network::sendRequest(pool, endpoint, fuerte::RestVerb::Get,
|
||||
DB + database + REPL_HOLD_READ_LOCK,
|
||||
VPackBuffer<uint8_t>(), network::Headers(), options)
|
||||
.get();
|
||||
|
||||
auto result = comres->result;
|
||||
|
||||
if (result != nullptr && result->getHttpReturnCode() == 200) {
|
||||
auto const idv = result->getBodyVelocyPack();
|
||||
auto const& idSlice = idv->slice();
|
||||
if (res.ok() && res.response->statusCode() == fuerte::StatusOK) {
|
||||
auto const idSlice = res.response->slice();
|
||||
TRI_ASSERT(idSlice.isObject());
|
||||
TRI_ASSERT(idSlice.hasKey(ID));
|
||||
try {
|
||||
|
@ -171,11 +165,7 @@ static arangodb::Result getReadLockId(std::string const& endpoint,
|
|||
return arangodb::Result(TRI_ERROR_INTERNAL, error);
|
||||
}
|
||||
} else {
|
||||
if (result) {
|
||||
error.append(result->getHttpReturnMessage());
|
||||
} else {
|
||||
error.append(comres->stringifyErrorMessage());
|
||||
}
|
||||
error.append(network::fuerteToArangoErrorMessage(res));
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL, error);
|
||||
}
|
||||
|
||||
|
@ -211,19 +201,16 @@ static arangodb::Result collectionCount(std::shared_ptr<arangodb::LogicalCollect
|
|||
return opResult.result;
|
||||
}
|
||||
|
||||
static arangodb::Result addShardFollower(std::string const& endpoint,
|
||||
std::string const& database,
|
||||
std::string const& shard, uint64_t lockJobId,
|
||||
std::string const& clientId,
|
||||
SyncerId const syncerId,
|
||||
std::string const& clientInfoString,
|
||||
double timeout = 120.0) {
|
||||
static arangodb::Result addShardFollower(
|
||||
network::ConnectionPool* pool, std::string const& endpoint,
|
||||
std::string const& database, std::string const& shard, uint64_t lockJobId,
|
||||
std::string const& clientId, SyncerId const syncerId,
|
||||
std::string const& clientInfoString, double timeout = 120.0) {
|
||||
LOG_TOPIC("b982e", DEBUG, Logger::MAINTENANCE)
|
||||
<< "addShardFollower: tell the leader to put us into the follower "
|
||||
"list...";
|
||||
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) { // nullptr only happens during controlled shutdown
|
||||
if (pool == nullptr) { // nullptr only happens during controlled shutdown
|
||||
return arangodb::Result(TRI_ERROR_SHUTTING_DOWN,
|
||||
"startReadLockOnLeader: Shutting down");
|
||||
}
|
||||
|
@ -242,9 +229,11 @@ static arangodb::Result addShardFollower(std::string const& endpoint,
|
|||
}
|
||||
|
||||
uint64_t docCount;
|
||||
Result res = collectionCount(collection, docCount);
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
{
|
||||
Result res = collectionCount(collection, docCount);
|
||||
if (res.fail()) {
|
||||
return res;
|
||||
}
|
||||
}
|
||||
VPackBuilder body;
|
||||
{
|
||||
|
@ -278,17 +267,18 @@ static arangodb::Result addShardFollower(std::string const& endpoint,
|
|||
}
|
||||
}
|
||||
|
||||
auto comres =
|
||||
cc->syncRequest(TRI_NewTickServer(), endpoint, rest::RequestType::PUT,
|
||||
DB + database + REPL_ADD_FOLLOWER, body.toJson(),
|
||||
std::unordered_map<std::string, std::string>(), timeout);
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(timeout);
|
||||
auto res = network::sendRequest(pool, endpoint, fuerte::RestVerb::Put,
|
||||
DB + database + REPL_ADD_FOLLOWER,
|
||||
std::move(*body.steal()), network::Headers(), options)
|
||||
.get();
|
||||
|
||||
auto result = comres->result;
|
||||
std::string errorMessage(
|
||||
"addShardFollower: could not add us to the leader's follower list. ");
|
||||
if (result == nullptr || result->getHttpReturnCode() != 200) {
|
||||
if (res.fail() || res.response->statusCode() != fuerte::StatusOK) {
|
||||
if (lockJobId != 0) {
|
||||
errorMessage += comres->stringifyErrorMessage();
|
||||
errorMessage += network::fuerteToArangoErrorMessage(res);
|
||||
LOG_TOPIC("22e0a", ERR, Logger::MAINTENANCE) << errorMessage;
|
||||
} else {
|
||||
errorMessage += "With shortcut (can happen, no problem).";
|
||||
|
@ -310,12 +300,12 @@ static arangodb::Result addShardFollower(std::string const& endpoint,
|
|||
}
|
||||
}
|
||||
|
||||
static arangodb::Result cancelReadLockOnLeader(std::string const& endpoint,
|
||||
static arangodb::Result cancelReadLockOnLeader(network::ConnectionPool* pool,
|
||||
std::string const& endpoint,
|
||||
std::string const& database, uint64_t lockJobId,
|
||||
std::string const& clientId,
|
||||
double timeout = 10.0) {
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) { // nullptr only happens during controlled shutdown
|
||||
if (pool == nullptr) { // nullptr only happens during controlled shutdown
|
||||
return arangodb::Result(TRI_ERROR_SHUTTING_DOWN,
|
||||
"cancelReadLockOnLeader: Shutting down");
|
||||
}
|
||||
|
@ -326,16 +316,15 @@ static arangodb::Result cancelReadLockOnLeader(std::string const& endpoint,
|
|||
body.add(ID, VPackValue(std::to_string(lockJobId)));
|
||||
}
|
||||
|
||||
auto comres =
|
||||
cc->syncRequest(TRI_NewTickServer(), endpoint, rest::RequestType::DELETE_REQ,
|
||||
DB + database + REPL_HOLD_READ_LOCK, body.toJson(),
|
||||
std::unordered_map<std::string, std::string>(), timeout);
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(timeout);
|
||||
auto res = network::sendRequest(pool, endpoint, fuerte::RestVerb::Delete,
|
||||
DB + database + REPL_HOLD_READ_LOCK,
|
||||
std::move(*body.steal()), network::Headers(), options)
|
||||
.get();
|
||||
|
||||
auto result = comres->result;
|
||||
|
||||
if (result != nullptr && result->getHttpReturnCode() == 404) {
|
||||
auto const vp = result->getBodyVelocyPack();
|
||||
auto const& slice = vp->slice();
|
||||
if (res.ok() && res.response && res.response->statusCode() == fuerte::StatusNotFound) {
|
||||
auto const slice = res.response->slice();
|
||||
if (slice.isObject()) {
|
||||
VPackSlice s = slice.get(StaticStrings::ErrorNum);
|
||||
if (s.isNumber()) {
|
||||
|
@ -348,8 +337,14 @@ static arangodb::Result cancelReadLockOnLeader(std::string const& endpoint,
|
|||
}
|
||||
}
|
||||
|
||||
if (result == nullptr || result->getHttpReturnCode() != 200) {
|
||||
auto errorMessage = comres->stringifyErrorMessage();
|
||||
if (res.fail() || res.response->statusCode() != fuerte::StatusOK) {
|
||||
auto errorMessage = network::fuerteToArangoErrorMessage(res);
|
||||
// rebuild body since we stole it earlier
|
||||
VPackBuilder body;
|
||||
{
|
||||
VPackObjectBuilder b(&body);
|
||||
body.add(ID, VPackValue(std::to_string(lockJobId)));
|
||||
}
|
||||
LOG_TOPIC("52924", ERR, Logger::MAINTENANCE)
|
||||
<< "cancelReadLockOnLeader: exception caught for " << body.toJson()
|
||||
<< ": " << errorMessage;
|
||||
|
@ -360,46 +355,40 @@ static arangodb::Result cancelReadLockOnLeader(std::string const& endpoint,
|
|||
return arangodb::Result();
|
||||
}
|
||||
|
||||
static arangodb::Result cancelBarrier(std::string const& endpoint,
|
||||
static arangodb::Result cancelBarrier(network::ConnectionPool* pool,
|
||||
std::string const& endpoint,
|
||||
std::string const& database, int64_t barrierId,
|
||||
std::string const& clientId, double timeout = 120.0) {
|
||||
if (barrierId <= 0) {
|
||||
return Result();
|
||||
}
|
||||
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) { // nullptr only happens during controlled shutdown
|
||||
if (pool == nullptr) { // nullptr only happens during controlled shutdown
|
||||
return arangodb::Result(TRI_ERROR_SHUTTING_DOWN,
|
||||
"startReadLockOnLeader: Shutting down");
|
||||
}
|
||||
|
||||
auto comres =
|
||||
cc->syncRequest(TRI_NewTickServer(), endpoint, rest::RequestType::DELETE_REQ,
|
||||
DB + database + REPL_BARRIER_API + std::to_string(barrierId),
|
||||
std::string(),
|
||||
std::unordered_map<std::string, std::string>(), timeout);
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(timeout);
|
||||
auto res =
|
||||
network::sendRequest(pool, endpoint, fuerte::RestVerb::Delete,
|
||||
DB + database + REPL_BARRIER_API + std::to_string(barrierId),
|
||||
VPackBuffer<uint8_t>(), network::Headers(), options)
|
||||
.get();
|
||||
|
||||
// I'm sure that syncRequest cannot return null. But the check doesn't hurt
|
||||
// and is preferable over a segfault.
|
||||
TRI_ASSERT(comres != nullptr);
|
||||
if (comres == nullptr) {
|
||||
LOG_TOPIC("00aa2", ERR, Logger::MAINTENANCE)
|
||||
<< "CancelBarrier: error: syncRequest returned null";
|
||||
return arangodb::Result{TRI_ERROR_INTERNAL};
|
||||
}
|
||||
|
||||
if (comres->status == CL_COMM_SENT) {
|
||||
auto result = comres->result;
|
||||
if (result == nullptr ||
|
||||
(result->getHttpReturnCode() != 200 && result->getHttpReturnCode() != 204)) {
|
||||
std::string errorMessage = comres->stringifyErrorMessage();
|
||||
LOG_TOPIC("f5733", ERR, Logger::MAINTENANCE) << "CancelBarrier: error" << errorMessage;
|
||||
if (res.ok()) {
|
||||
auto* response = res.response.get();
|
||||
if (response->statusCode() != fuerte::StatusOK &&
|
||||
response->statusCode() != fuerte::StatusNoContent) {
|
||||
std::string errorMessage = "got status " + std::to_string(response->statusCode());
|
||||
LOG_TOPIC("f5733", ERR, Logger::MAINTENANCE)
|
||||
<< "CancelBarrier: error '" << errorMessage << "'";
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL, errorMessage);
|
||||
}
|
||||
} else {
|
||||
std::string error(
|
||||
"CancelBarrier: failed to send message to leader : status ");
|
||||
error += ClusterCommResult::stringifyStatus(comres->status);
|
||||
error += network::fuerteToArangoErrorMessage(res);
|
||||
LOG_TOPIC("1c48a", ERR, Logger::MAINTENANCE) << error;
|
||||
return arangodb::Result(TRI_ERROR_INTERNAL, error);
|
||||
}
|
||||
|
@ -413,32 +402,33 @@ static inline bool isStopping() {
|
|||
return server.isStopping();
|
||||
}
|
||||
|
||||
arangodb::Result SynchronizeShard::getReadLock(std::string const& endpoint,
|
||||
arangodb::Result SynchronizeShard::getReadLock(network::ConnectionPool* pool,
|
||||
std::string const& endpoint,
|
||||
std::string const& database,
|
||||
std::string const& collection,
|
||||
std::string const& clientId, uint64_t rlid,
|
||||
bool soft, double timeout) {
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc == nullptr) { // nullptr only happens during controlled shutdown
|
||||
if (pool == nullptr) { // nullptr only happens during controlled shutdown
|
||||
return arangodb::Result(TRI_ERROR_SHUTTING_DOWN,
|
||||
"startReadLockOnLeader: Shutting down");
|
||||
}
|
||||
|
||||
VPackBuilder body;
|
||||
VPackBuilder bodyBuilder;
|
||||
{
|
||||
VPackObjectBuilder o(&body);
|
||||
body.add(ID, VPackValue(std::to_string(rlid)));
|
||||
body.add(COLLECTION, VPackValue(collection));
|
||||
body.add(TTL, VPackValue(timeout));
|
||||
body.add(StaticStrings::ReplicationSoftLockOnly, VPackValue(soft));
|
||||
VPackObjectBuilder o(&bodyBuilder);
|
||||
bodyBuilder.add(ID, VPackValue(std::to_string(rlid)));
|
||||
bodyBuilder.add(COLLECTION, VPackValue(collection));
|
||||
bodyBuilder.add(TTL, VPackValue(timeout));
|
||||
bodyBuilder.add(StaticStrings::ReplicationSoftLockOnly, VPackValue(soft));
|
||||
}
|
||||
auto body = bodyBuilder.steal();
|
||||
|
||||
auto url = DB + database + REPL_HOLD_READ_LOCK;
|
||||
|
||||
cc->asyncRequest(TRI_NewTickServer(), endpoint, rest::RequestType::POST, url,
|
||||
std::make_shared<std::string>(body.toJson()),
|
||||
std::unordered_map<std::string, std::string>(),
|
||||
std::make_shared<SynchronizeShardCallback>(this), timeout, true, timeout);
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(timeout);
|
||||
auto dummy = network::sendRequest(pool, endpoint, fuerte::RestVerb::Post, url,
|
||||
*body, network::Headers(), options);
|
||||
|
||||
// Intentionally do not look at the outcome, even in case of an error
|
||||
// we must make sure that the read lock on the leader is not active!
|
||||
|
@ -454,14 +444,12 @@ arangodb::Result SynchronizeShard::getReadLock(std::string const& endpoint,
|
|||
}
|
||||
|
||||
// Now check that we hold the read lock:
|
||||
auto putres = cc->syncRequest(TRI_NewTickServer(), endpoint,
|
||||
rest::RequestType::PUT, url, body.toJson(),
|
||||
std::unordered_map<std::string, std::string>(), timeout);
|
||||
auto res = network::sendRequest(pool, endpoint, fuerte::RestVerb::Put, url,
|
||||
*body, network::Headers(), options)
|
||||
.get();
|
||||
|
||||
auto result = putres->result;
|
||||
if (result != nullptr && result->getHttpReturnCode() == 200) {
|
||||
auto const vp = putres->result->getBodyVelocyPack();
|
||||
auto const& slice = vp->slice();
|
||||
if (res.ok() && res.response->statusCode() == fuerte::StatusOK) {
|
||||
auto const slice = res.response->slice();
|
||||
TRI_ASSERT(slice.isObject());
|
||||
VPackSlice lockHeld = slice.get("lockHeld");
|
||||
if (lockHeld.isBoolean() && lockHeld.getBool()) {
|
||||
|
@ -470,9 +458,8 @@ arangodb::Result SynchronizeShard::getReadLock(std::string const& endpoint,
|
|||
LOG_TOPIC("b681f", DEBUG, Logger::MAINTENANCE)
|
||||
<< "startReadLockOnLeader: Lock not yet acquired...";
|
||||
} else {
|
||||
if (result != nullptr && result->getHttpReturnCode() == 404) {
|
||||
auto const vp = result->getBodyVelocyPack();
|
||||
auto const& slice = vp->slice();
|
||||
if (res.ok() && res.response->statusCode() == fuerte::StatusNotFound) {
|
||||
auto const slice = res.response->slice();
|
||||
if (slice.isObject()) {
|
||||
VPackSlice s = slice.get(StaticStrings::ErrorNum);
|
||||
if (s.isNumber()) {
|
||||
|
@ -486,9 +473,9 @@ arangodb::Result SynchronizeShard::getReadLock(std::string const& endpoint,
|
|||
// fall-through to other cases intentional here
|
||||
}
|
||||
|
||||
std::string message = network::fuerteToArangoErrorMessage(res);
|
||||
LOG_TOPIC("a82bc", DEBUG, Logger::MAINTENANCE)
|
||||
<< "startReadLockOnLeader: Do not see read lock yet:"
|
||||
<< putres->stringifyErrorMessage();
|
||||
<< "startReadLockOnLeader: Do not see read lock yet:" << message;
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(duration<double>(sleepTime));
|
||||
|
@ -497,13 +484,14 @@ arangodb::Result SynchronizeShard::getReadLock(std::string const& endpoint,
|
|||
LOG_TOPIC("75e2b", ERR, Logger::MAINTENANCE) << "startReadLockOnLeader: giving up";
|
||||
|
||||
try {
|
||||
auto r = cc->syncRequest(TRI_NewTickServer(), endpoint,
|
||||
rest::RequestType::DELETE_REQ, url, body.toJson(),
|
||||
std::unordered_map<std::string, std::string>(), timeout);
|
||||
if (r->result == nullptr || r->result->getHttpReturnCode() != 200) {
|
||||
auto r = network::sendRequest(pool, endpoint, fuerte::RestVerb::Delete, url,
|
||||
*body, network::Headers(), options)
|
||||
.get();
|
||||
if (r.fail() || r.response->statusCode() != fuerte::StatusOK) {
|
||||
std::string addendum = network::fuerteToArangoErrorMessage(r);
|
||||
LOG_TOPIC("4f34d", ERR, Logger::MAINTENANCE)
|
||||
<< "startReadLockOnLeader: cancelation error for shard - " << collection
|
||||
<< " " << r->getErrorCode() << ": " << r->stringifyErrorMessage();
|
||||
<< "startReadLockOnLeader: cancelation error for shard - "
|
||||
<< collection << ": " << addendum;
|
||||
}
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC("7fcc9", ERR, Logger::MAINTENANCE)
|
||||
|
@ -519,14 +507,17 @@ arangodb::Result SynchronizeShard::startReadLockOnLeader(
|
|||
std::string const& clientId, uint64_t& rlid, bool soft, double timeout) {
|
||||
// Read lock id
|
||||
rlid = 0;
|
||||
arangodb::Result result = getReadLockId(endpoint, database, clientId, timeout, rlid);
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
arangodb::Result result =
|
||||
getReadLockId(pool, endpoint, database, clientId, timeout, rlid);
|
||||
if (!result.ok()) {
|
||||
LOG_TOPIC("2e5ae", ERR, Logger::MAINTENANCE) << result.errorMessage();
|
||||
return result;
|
||||
}
|
||||
LOG_TOPIC("c8d18", DEBUG, Logger::MAINTENANCE) << "Got read lock id: " << rlid;
|
||||
|
||||
result = getReadLock(endpoint, database, collection, clientId, rlid, soft, timeout);
|
||||
result = getReadLock(pool, endpoint, database, collection, clientId, rlid, soft, timeout);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -820,8 +811,10 @@ bool SynchronizeShard::first() {
|
|||
<< database << "/" << shard << "' for central '" << database << "/"
|
||||
<< planId << "'";
|
||||
try {
|
||||
auto asResult =
|
||||
addShardFollower(ep, database, shard, 0, clientId, SyncerId{}, _clientInfoString, 60.0);
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
auto asResult = addShardFollower(pool, ep, database, shard, 0, clientId,
|
||||
SyncerId{}, _clientInfoString, 60.0);
|
||||
|
||||
if (asResult.ok()) {
|
||||
if (Logger::isEnabled(LogLevel::DEBUG, Logger::MAINTENANCE)) {
|
||||
|
@ -918,7 +911,9 @@ bool SynchronizeShard::first() {
|
|||
// From here on, we have to call `cancelBarrier` in case of errors
|
||||
// as well as in the success case!
|
||||
auto barrierId = sy.get(BARRIER_ID).getNumber<int64_t>();
|
||||
TRI_DEFER(cancelBarrier(ep, database, barrierId, clientId));
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
TRI_DEFER(cancelBarrier(pool, ep, database, barrierId, clientId));
|
||||
|
||||
VPackSlice collections = sy.get(COLLECTIONS);
|
||||
|
||||
|
@ -1018,10 +1013,12 @@ ResultT<TRI_voc_tick_t> SynchronizeShard::catchupWithReadLock(
|
|||
return ResultT<TRI_voc_tick_t>::error(TRI_ERROR_INTERNAL, errorMessage);
|
||||
}
|
||||
|
||||
auto readLockGuard = arangodb::scopeGuard([&]() {
|
||||
auto readLockGuard = arangodb::scopeGuard([&, this]() {
|
||||
// Always cancel the read lock.
|
||||
// Reported seperately
|
||||
auto res = cancelReadLockOnLeader(ep, database, lockJobId, clientId, 60.0);
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
auto res = cancelReadLockOnLeader(pool, ep, database, lockJobId, clientId, 60.0);
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC("b15ee", INFO, Logger::MAINTENANCE)
|
||||
<< "Could not cancel soft read lock on leader: " << res.errorMessage();
|
||||
|
@ -1061,7 +1058,9 @@ ResultT<TRI_voc_tick_t> SynchronizeShard::catchupWithReadLock(
|
|||
}
|
||||
|
||||
// Stop the read lock again:
|
||||
res = cancelReadLockOnLeader(ep, database, lockJobId, clientId, 60.0);
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
res = cancelReadLockOnLeader(pool, ep, database, lockJobId, clientId, 60.0);
|
||||
// We removed the readlock
|
||||
readLockGuard.cancel();
|
||||
if (!res.ok()) {
|
||||
|
@ -1100,10 +1099,12 @@ Result SynchronizeShard::catchupWithExclusiveLock(
|
|||
"synchronizeOneShard: error in startReadLockOnLeader (hard):" + res.errorMessage();
|
||||
return {TRI_ERROR_INTERNAL, errorMessage};
|
||||
}
|
||||
auto readLockGuard = arangodb::scopeGuard([&]() {
|
||||
auto readLockGuard = arangodb::scopeGuard([&, this]() {
|
||||
// Always cancel the read lock.
|
||||
// Reported seperately
|
||||
auto res = cancelReadLockOnLeader(ep, database, lockJobId, clientId, 60.0);
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
auto res = cancelReadLockOnLeader(pool, ep, database, lockJobId, clientId, 60.0);
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC("067a8", INFO, Logger::MAINTENANCE)
|
||||
<< "Could not cancel hard read lock on leader: " << res.errorMessage();
|
||||
|
@ -1133,7 +1134,10 @@ Result SynchronizeShard::catchupWithExclusiveLock(
|
|||
return {TRI_ERROR_INTERNAL, errorMessage};
|
||||
}
|
||||
|
||||
res = addShardFollower(ep, database, shard, lockJobId, clientId, syncerId, _clientInfoString, 60.0);
|
||||
NetworkFeature& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
res = addShardFollower(pool, ep, database, shard, lockJobId, clientId,
|
||||
syncerId, _clientInfoString, 60.0);
|
||||
|
||||
if (!res.ok()) {
|
||||
std::string errorMessage(
|
||||
|
|
|
@ -33,6 +33,9 @@
|
|||
#include <chrono>
|
||||
|
||||
namespace arangodb {
|
||||
namespace network {
|
||||
class ConnectionPool;
|
||||
}
|
||||
|
||||
class LogicalCollection;
|
||||
struct SyncerId;
|
||||
|
@ -50,7 +53,8 @@ class SynchronizeShard : public ActionBase {
|
|||
void setState(ActionState state) override final;
|
||||
|
||||
private:
|
||||
arangodb::Result getReadLock(std::string const& endpoint, std::string const& database,
|
||||
arangodb::Result getReadLock(network::ConnectionPool* pool,
|
||||
std::string const& endpoint, std::string const& database,
|
||||
std::string const& collection, std::string const& clientId,
|
||||
uint64_t rlid, bool soft, double timeout = 300.0);
|
||||
|
||||
|
|
|
@ -27,14 +27,17 @@
|
|||
#include "MaintenanceFeature.h"
|
||||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Logger/LoggerStream.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Transaction/ClusterUtils.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
|
@ -54,6 +57,16 @@ using namespace arangodb::application_features;
|
|||
using namespace arangodb::maintenance;
|
||||
using namespace arangodb::methods;
|
||||
|
||||
namespace {
|
||||
static std::string serverPrefix("server:");
|
||||
|
||||
std::string stripServerPrefix(std::string const& destination) {
|
||||
TRI_ASSERT(destination.size() >= serverPrefix.size() &&
|
||||
destination.substr(0, serverPrefix.size()) == serverPrefix);
|
||||
return destination.substr(serverPrefix.size());
|
||||
}
|
||||
} // namespace
|
||||
|
||||
TakeoverShardLeadership::TakeoverShardLeadership(MaintenanceFeature& feature,
|
||||
ActionDescription const& desc)
|
||||
: ActionBase(feature, desc) {
|
||||
|
@ -97,13 +110,12 @@ TakeoverShardLeadership::TakeoverShardLeadership(MaintenanceFeature& feature,
|
|||
|
||||
TakeoverShardLeadership::~TakeoverShardLeadership() = default;
|
||||
|
||||
static void sendLeaderChangeRequests(std::vector<ServerID> const& currentServers,
|
||||
std::shared_ptr<std::vector<ServerID>>& realInsyncFollowers,
|
||||
std::string const& databaseName, ShardID const& shardID,
|
||||
std::string const& oldLeader) {
|
||||
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
static void sendLeaderChangeRequests(network::ConnectionPool* pool,
|
||||
std::vector<ServerID> const& currentServers,
|
||||
std::shared_ptr<std::vector<ServerID>>& realInsyncFollowers,
|
||||
std::string const& databaseName,
|
||||
ShardID const& shardID, std::string const& oldLeader) {
|
||||
if (pool == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return;
|
||||
}
|
||||
|
@ -121,26 +133,31 @@ static void sendLeaderChangeRequests(std::vector<ServerID> const& currentServers
|
|||
|
||||
std::string const url = "/_db/" + databaseName + "/_api/replication/set-the-leader";
|
||||
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
auto body = std::make_shared<std::string>(bodyBuilder.toJson());
|
||||
std::vector<network::FutureRes> futures;
|
||||
auto body = bodyBuilder.steal();
|
||||
network::Headers headers;
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(3.0);
|
||||
for (auto const& srv : currentServers) {
|
||||
if (srv == sid) {
|
||||
continue; // ignore ourself
|
||||
}
|
||||
LOG_TOPIC("42516", DEBUG, Logger::MAINTENANCE)
|
||||
<< "Sending " << bodyBuilder.toJson() << " to " << srv;
|
||||
requests.emplace_back("server:" + srv, RequestType::PUT, url, body);
|
||||
auto f = network::sendRequest(pool, "server:" + srv, fuerte::RestVerb::Put,
|
||||
url, *body, headers, options);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
|
||||
cc->performRequests(requests, 3.0, Logger::COMMUNICATION, false);
|
||||
auto responses = futures::collectAll(futures).get();
|
||||
|
||||
// This code intentionally ignores all errors
|
||||
realInsyncFollowers = std::make_shared<std::vector<ServerID>>();
|
||||
for (auto const& req : requests) {
|
||||
ClusterCommResult const& result = req.result;
|
||||
if (result.status == CL_COMM_RECEIVED && result.errorCode == TRI_ERROR_NO_ERROR) {
|
||||
if (result.result && result.result->getHttpReturnCode() == 200) {
|
||||
realInsyncFollowers->push_back(result.serverID);
|
||||
for (auto const& res : responses) {
|
||||
if (res.hasValue() && res.get().ok()) {
|
||||
auto& result = res.get();
|
||||
if (result.response && result.response->statusCode() == fuerte::StatusOK) {
|
||||
realInsyncFollowers->push_back(::stripServerPrefix(result.destination));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -178,7 +195,11 @@ static void handleLeadership(LogicalCollection& collection,
|
|||
oldLeader = oldLeader.substr(1);
|
||||
|
||||
// Update all follower and tell them that we are the leader now
|
||||
sendLeaderChangeRequests(currentServers, realInsyncFollowers, databaseName, collection.name(), oldLeader);
|
||||
NetworkFeature& nf =
|
||||
collection.vocbase().server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
sendLeaderChangeRequests(pool, currentServers, realInsyncFollowers,
|
||||
databaseName, collection.name(), oldLeader);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -22,10 +22,13 @@
|
|||
|
||||
#include "MMFilesMethods.h"
|
||||
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "ClusterEngine/ClusterEngine.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
|
@ -44,8 +47,9 @@ int rotateActiveJournalOnAllDBServers(std::string const& dbname, std::string con
|
|||
|
||||
auto& server = ce->server();
|
||||
// Set a few variables needed for our work:
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
NetworkFeature const& nf = server.getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
@ -59,25 +63,32 @@ int rotateActiveJournalOnAllDBServers(std::string const& dbname, std::string con
|
|||
|
||||
std::string const baseUrl =
|
||||
"/_db/" + basics::StringUtils::urlEncode(dbname) + "/_api/collection/";
|
||||
std::shared_ptr<std::string> body;
|
||||
|
||||
VPackBuffer<uint8_t> body;
|
||||
network::Headers headers;
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(600.0);
|
||||
|
||||
// now we notify all leader and follower shards
|
||||
std::shared_ptr<ShardMap> shardList = collinfo->shardIds();
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
std::vector<network::FutureRes> futures;
|
||||
for (auto const& shard : *shardList) {
|
||||
for (ServerID const& server : shard.second) {
|
||||
std::string uri =
|
||||
baseUrl + basics::StringUtils::urlEncode(shard.first) + "/rotate";
|
||||
requests.emplace_back("server:" + server, arangodb::rest::RequestType::PUT,
|
||||
std::move(uri), body);
|
||||
auto f = network::sendRequest(pool, "server:" + server, fuerte::RestVerb::Put,
|
||||
std::move(uri), body, headers, options);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
}
|
||||
|
||||
size_t nrGood = cc->performRequests(requests, 600.0, Logger::ENGINES, false);
|
||||
|
||||
if (nrGood < requests.size()) {
|
||||
return TRI_ERROR_FAILED;
|
||||
auto responses = futures::collectAll(futures).get();
|
||||
for (auto const& r : responses) {
|
||||
if (!r.hasValue() || r.get().fail()) {
|
||||
return TRI_ERROR_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,10 +23,12 @@
|
|||
#include "RocksDBMethods.h"
|
||||
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "ClusterEngine/ClusterEngine.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
|
@ -45,8 +47,9 @@ Result recalculateCountsOnAllDBServers(std::string const& dbname, std::string co
|
|||
|
||||
auto& server = ce->server();
|
||||
// Set a few variables needed for our work:
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
NetworkFeature const& nf = server.getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
|
@ -60,25 +63,32 @@ Result recalculateCountsOnAllDBServers(std::string const& dbname, std::string co
|
|||
|
||||
std::string const baseUrl =
|
||||
"/_db/" + basics::StringUtils::urlEncode(dbname) + "/_api/collection/";
|
||||
std::shared_ptr<std::string> body;
|
||||
|
||||
VPackBuffer<uint8_t> body;
|
||||
network::Headers headers;
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(600.0);
|
||||
|
||||
// now we notify all leader and follower shards
|
||||
std::shared_ptr<ShardMap> shardList = collinfo->shardIds();
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
std::vector<network::FutureRes> futures;
|
||||
for (auto const& shard : *shardList) {
|
||||
for (ServerID const& server : shard.second) {
|
||||
std::string uri = baseUrl + basics::StringUtils::urlEncode(shard.first) +
|
||||
"/recalculateCount";
|
||||
requests.emplace_back("server:" + server, arangodb::rest::RequestType::PUT,
|
||||
std::move(uri), body);
|
||||
auto f = network::sendRequest(pool, "server:" + server, fuerte::RestVerb::Put,
|
||||
std::move(uri), body, headers, options);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
}
|
||||
|
||||
size_t nrGood = cc->performRequests(requests, 600.0, Logger::ENGINES, false);
|
||||
|
||||
if (nrGood < requests.size()) {
|
||||
return TRI_ERROR_FAILED;
|
||||
auto responses = futures::collectAll(futures).get();
|
||||
for (auto const& r : responses) {
|
||||
if (!r.hasValue() || r.get().fail()) {
|
||||
return TRI_ERROR_FAILED;
|
||||
}
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
|
|
@ -107,30 +107,20 @@ futures::Future<Result> RestHandler::forwardRequest(bool& forwarded) {
|
|||
return futures::makeFuture(Result());
|
||||
}
|
||||
|
||||
// TODO refactor into a more general/customizable method
|
||||
//
|
||||
// The below is mostly copied and only lightly modified from
|
||||
// RestReplicationHandler::handleTrampolineCoordinator; however, that method
|
||||
// needs some more specific checks regarding headers and param values, so we
|
||||
// can't just reuse this method there. Maybe we just need to implement some
|
||||
// virtual methods to handle param/header filtering?
|
||||
|
||||
// TODO verify that vst -> http -> vst conversion works correctly
|
||||
|
||||
uint32_t shortId = forwardingTarget();
|
||||
if (shortId == 0) {
|
||||
std::string serverId = forwardingTarget();
|
||||
if (serverId.empty()) {
|
||||
// no need to actually forward
|
||||
return futures::makeFuture(Result());
|
||||
}
|
||||
|
||||
std::string serverId =
|
||||
server().getFeature<ClusterFeature>().clusterInfo().getCoordinatorByShortID(shortId);
|
||||
|
||||
if (serverId.empty()) {
|
||||
// no mapping in agency, try to handle the request here
|
||||
return futures::makeFuture(Result());
|
||||
NetworkFeature const& nf = server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
generateError(rest::ResponseCode::SERVICE_UNAVAILABLE,
|
||||
TRI_ERROR_SHUTTING_DOWN, "shutting down server");
|
||||
return futures::makeFuture(Result(TRI_ERROR_SHUTTING_DOWN));
|
||||
}
|
||||
|
||||
LOG_TOPIC("38d99", DEBUG, Logger::REQUESTS)
|
||||
<< "forwarding request " << _request->messageId() << " to " << serverId;
|
||||
|
||||
|
@ -158,14 +148,22 @@ futures::Future<Result> RestHandler::forwardRequest(bool& forwarded) {
|
|||
params.append(StringUtils::urlEncode(i.second));
|
||||
}
|
||||
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(300);
|
||||
options.contentType = rest::contentTypeToString(_request->contentType());
|
||||
options.acceptType = rest::contentTypeToString(_request->contentTypeResponse());
|
||||
|
||||
auto requestType =
|
||||
fuerte::from_string(GeneralRequest::translateMethod(_request->requestType()));
|
||||
auto payload = _request->toVelocyPackBuilderPtr()->steal();
|
||||
auto* pool = server().getFeature<NetworkFeature>().pool();
|
||||
|
||||
VPackStringRef resPayload = _request->rawPayload();
|
||||
VPackBuffer<uint8_t> payload(resPayload.size());
|
||||
payload.append(resPayload.data(), resPayload.size());
|
||||
|
||||
auto future = network::sendRequest(pool, "server:" + serverId, requestType,
|
||||
"/_db/" + StringUtils::urlEncode(dbname) +
|
||||
_request->requestPath() + params,
|
||||
std::move(*payload), network::Timeout(300), headers);
|
||||
std::move(payload), std::move(headers), options);
|
||||
auto cb = [this, serverId, useVst,
|
||||
self = shared_from_this()](network::Response&& response) -> Result {
|
||||
int res = network::fuerteToArangoErrorCode(response);
|
||||
|
@ -187,6 +185,7 @@ futures::Future<Result> RestHandler::forwardRequest(bool& forwarded) {
|
|||
} else {
|
||||
_response->setPayload(std::move(*response.response->stealPayload()), true);
|
||||
}
|
||||
|
||||
|
||||
auto const& resultHeaders = response.response->messageHeader().meta();
|
||||
for (auto const& it : resultHeaders) {
|
||||
|
|
|
@ -139,10 +139,10 @@ class RestHandler : public std::enable_shared_from_this<RestHandler> {
|
|||
///
|
||||
/// This method will be called to determine if the request should be
|
||||
/// forwarded to another server, and if so, which server. If it should be
|
||||
/// handled by this server, the method should return 0. Otherwise, this
|
||||
/// method should return a valid (non-zero) short ID (TransactionID) for the
|
||||
/// handled by this server, the method should return an empty string.
|
||||
/// Otherwise, this method should return a valid short name for the
|
||||
/// target server.
|
||||
virtual uint32_t forwardingTarget() { return 0; }
|
||||
virtual std::string forwardingTarget() { return ""; }
|
||||
|
||||
void resetResponse(rest::ResponseCode);
|
||||
|
||||
|
|
|
@ -22,10 +22,16 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ShortestPathFinder.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
|
||||
#include "Aql/Query.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Graph/ClusterTraverserCache.h"
|
||||
#include "Graph/ShortestPathOptions.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "Transaction/Methods.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
@ -37,10 +43,11 @@ ShortestPathFinder::ShortestPathFinder(ShortestPathOptions& options)
|
|||
|
||||
void ShortestPathFinder::destroyEngines() {
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
NetworkFeature const& nf =
|
||||
_options.query()->vocbase().server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
// We have to clean up the engines in Coordinator Case.
|
||||
auto cc = ClusterComm::instance();
|
||||
|
||||
if (cc != nullptr) {
|
||||
if (pool != nullptr) {
|
||||
auto ch = reinterpret_cast<ClusterTraverserCache*>(_options.cache());
|
||||
// nullptr only happens on controlled server shutdown
|
||||
std::string const url(
|
||||
|
@ -50,22 +57,19 @@ void ShortestPathFinder::destroyEngines() {
|
|||
|
||||
for (auto const& it : *ch->engines()) {
|
||||
incHttpRequests(1);
|
||||
arangodb::CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
auto res = cc->syncRequest(coordTransactionID, "server:" + it.first,
|
||||
RequestType::DELETE_REQ,
|
||||
url + arangodb::basics::StringUtils::itoa(it.second),
|
||||
"", headers, 30.0);
|
||||
network::Headers headers;
|
||||
auto res =
|
||||
network::sendRequest(pool, "server:" + it.first, fuerte::RestVerb::Delete,
|
||||
url + arangodb::basics::StringUtils::itoa(it.second),
|
||||
VPackBuffer<uint8_t>(), network::Timeout(30.0), headers)
|
||||
.get();
|
||||
|
||||
if (res->status != CL_COMM_SENT) {
|
||||
if (res.error != fuerte::Error::NoError) {
|
||||
// Note If there was an error on server side we do not have
|
||||
// CL_COMM_SENT
|
||||
std::string message("Could not destroy all traversal engines");
|
||||
|
||||
if (!res->errorMessage.empty()) {
|
||||
message += std::string(": ") + res->errorMessage;
|
||||
}
|
||||
|
||||
message += std::string(": ") +
|
||||
TRI_errno_string(network::fuerteToArangoErrorCode(res));
|
||||
LOG_TOPIC("d31a4", ERR, arangodb::Logger::FIXME) << message;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -71,15 +71,18 @@
|
|||
#include "Basics/error.h"
|
||||
#include "Basics/system-compiler.h"
|
||||
#include "Basics/voc-errors.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "FeaturePhases/V8FeaturePhase.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "IResearchAnalyzerFeature.h"
|
||||
#include "IResearchCommon.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Logger/LoggerStream.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
#include "Rest/GeneralRequest.h"
|
||||
#include "RestHandler/RestVocbaseBaseHandler.h"
|
||||
|
@ -747,13 +750,17 @@ arangodb::Result visitAnalyzers( // visit analyzers
|
|||
// FIXME TODO find a better way to query a cluster collection
|
||||
// workaround for aql::Query failing to execute on a cluster collection
|
||||
if (arangodb::ServerState::instance()->isDBServer()) {
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
arangodb::NetworkFeature const& feature =
|
||||
vocbase.server().getFeature<arangodb::NetworkFeature>();
|
||||
arangodb::network::ConnectionPool* pool = feature.pool();
|
||||
|
||||
if (!cc) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("failure to find 'ClusterComm' instance while visiting Analyzer collection '") + ANALYZER_COLLECTION_NAME + "' in vocbase '" + vocbase.name() + "'"
|
||||
);
|
||||
if (!pool) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_SHUTTING_DOWN, // code
|
||||
std::string("failure to find connection pool while visiting Analyzer "
|
||||
"collection '") +
|
||||
ANALYZER_COLLECTION_NAME + "' in vocbase '" + vocbase.name() +
|
||||
"', server is likely shutting down");
|
||||
}
|
||||
|
||||
auto collection = getAnalyzerCollection(vocbase);
|
||||
|
@ -762,8 +769,15 @@ arangodb::Result visitAnalyzers( // visit analyzers
|
|||
return arangodb::Result(); // nothing to load
|
||||
}
|
||||
|
||||
static const std::string body("{}"); // RestSimpleQueryHandler::allDocuments() expects opbject (calls get() on slice)
|
||||
std::vector<arangodb::ClusterCommRequest> requests;
|
||||
// RestSimpleQueryHandler::allDocuments() expects opbject (calls get() on slice)
|
||||
VPackBuffer<uint8_t> buffer;
|
||||
{
|
||||
VPackBuilder builder(buffer);
|
||||
builder.openObject();
|
||||
builder.close();
|
||||
}
|
||||
arangodb::network::Headers headers;
|
||||
std::vector<arangodb::network::FutureRes> futures;
|
||||
|
||||
// create a request for every shard
|
||||
//for (auto& entry: collection->errorNum()) {
|
||||
|
@ -774,50 +788,53 @@ arangodb::Result visitAnalyzers( // visit analyzers
|
|||
+ arangodb::RestVocbaseBaseHandler::SIMPLE_QUERY_ALL_PATH
|
||||
+ "?collection=" + shardId;
|
||||
|
||||
requests.emplace_back( // add shard request
|
||||
"shard:" + shardId, // shard
|
||||
arangodb::rest::RequestType::PUT, // request type as per SimpleQueryHandker
|
||||
url, // request url
|
||||
std::shared_ptr<std::string const>(&body, [](std::string const*)->void {}) // body
|
||||
);
|
||||
auto f = arangodb::network::sendRequest(pool,
|
||||
"shard:" + shardId, // shard
|
||||
arangodb::fuerte::RestVerb::Put, // request type as per SimpleQueryHandker
|
||||
url, // request url
|
||||
buffer, // body
|
||||
arangodb::network::Timeout(120.0), headers);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
|
||||
// same timeout as in ClusterMethods::getDocumentOnCoordinator()
|
||||
cc->performRequests( // execute requests
|
||||
requests, 120.0, arangodb::iresearch::TOPIC, false, false // args
|
||||
);
|
||||
auto results = arangodb::futures::collectAll(futures).get();
|
||||
for (auto& r : results) {
|
||||
auto& res = r.get();
|
||||
if (res.error != arangodb::fuerte::Error::NoError) {
|
||||
return arangodb::Result(arangodb::network::fuerteToArangoErrorCode(res));
|
||||
}
|
||||
|
||||
for (auto& request: requests) {
|
||||
if (TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND == request.result.errorCode) {
|
||||
if (res.response->statusCode() == arangodb::fuerte::StatusNotFound) {
|
||||
continue; // treat missing collection as if there are no analyzers
|
||||
}
|
||||
|
||||
if (TRI_ERROR_NO_ERROR != request.result.errorCode) {
|
||||
return arangodb::Result( // result
|
||||
request.result.errorCode, request.result.errorMessage // args
|
||||
);
|
||||
std::vector<VPackSlice> slices = res.response->slices();
|
||||
if (slices.empty() || !slices[0].isObject()) {
|
||||
return arangodb::Result(
|
||||
TRI_ERROR_INTERNAL,
|
||||
"got misformed result while visiting Analyzer collection'" + ANALYZER_COLLECTION_NAME +
|
||||
"' in vocbase '" + vocbase.name() + "'");
|
||||
}
|
||||
|
||||
if (!request.result.answer) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("failed to get answer from 'ClusterComm' instance while visiting Analyzer collection '") + ANALYZER_COLLECTION_NAME + "' in vocbase '" + vocbase.name() + "'"
|
||||
);
|
||||
VPackSlice answer = slices[0];
|
||||
arangodb::Result result =
|
||||
arangodb::network::resultFromBody(answer, TRI_ERROR_NO_ERROR);
|
||||
if (result.fail()) {
|
||||
return result;
|
||||
}
|
||||
|
||||
auto slice = request.result.answer->payload();
|
||||
|
||||
if (!slice.hasKey("result")) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string("failed to parse result from 'ClusterComm' instance while visiting Analyzer collection '") + ANALYZER_COLLECTION_NAME + "' in vocbase '" + vocbase.name() + "'"
|
||||
);
|
||||
if (!answer.hasKey("result")) {
|
||||
return arangodb::Result( // result
|
||||
TRI_ERROR_INTERNAL, // code
|
||||
std::string(
|
||||
"failed to parse result while visiting Analyzer collection '") +
|
||||
ANALYZER_COLLECTION_NAME + "' in vocbase '" + vocbase.name() +
|
||||
"'");
|
||||
}
|
||||
|
||||
auto res = resultVisitor(visitor, vocbase, slice.get("result"));
|
||||
|
||||
if (!res.ok()) {
|
||||
return res;
|
||||
result = resultVisitor(visitor, vocbase, answer.get("result"));
|
||||
if (!result.ok()) {
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -65,7 +65,7 @@ std::string Response::serverId() const {
|
|||
|
||||
template <typename T>
|
||||
auto prepareRequest(RestVerb type, std::string const& path, T&& payload,
|
||||
Timeout timeout, Headers headers) {
|
||||
Headers headers, RequestOptions options) {
|
||||
fuerte::StringMap params; // intentionally empty
|
||||
auto req = fuerte::createRequest(type, path, params, std::forward<T>(payload));
|
||||
req->header.parseArangoPath(path); // strips /_db/<name>/
|
||||
|
@ -74,11 +74,18 @@ auto prepareRequest(RestVerb type, std::string const& path, T&& payload,
|
|||
}
|
||||
req->header.setMeta(std::move(headers));
|
||||
|
||||
if (!options.contentType.empty()) {
|
||||
req->header.contentType(options.contentType);
|
||||
}
|
||||
if (!options.acceptType.empty()) {
|
||||
req->header.acceptType(options.acceptType);
|
||||
}
|
||||
|
||||
TRI_voc_tick_t timeStamp = TRI_HybridLogicalClock();
|
||||
req->header.addMeta(StaticStrings::HLCHeader,
|
||||
arangodb::basics::HybridLogicalClock::encodeTimeStamp(timeStamp));
|
||||
|
||||
req->timeout(std::chrono::duration_cast<std::chrono::milliseconds>(timeout));
|
||||
req->timeout(std::chrono::duration_cast<std::chrono::milliseconds>(options.timeout));
|
||||
|
||||
auto state = ServerState::instance();
|
||||
if (state->isCoordinator() || state->isDBServer()) {
|
||||
|
@ -97,6 +104,16 @@ auto prepareRequest(RestVerb type, std::string const& path, T&& payload,
|
|||
FutureRes sendRequest(ConnectionPool* pool, DestinationId const& destination, RestVerb type,
|
||||
std::string const& path, velocypack::Buffer<uint8_t> payload,
|
||||
Timeout timeout, Headers headers) {
|
||||
RequestOptions options;
|
||||
options.timeout = timeout;
|
||||
return sendRequest(pool, std::move(destination), type, std::move(path),
|
||||
std::move(payload), std::move(headers), options);
|
||||
}
|
||||
|
||||
/// @brief send a request to a given destination
|
||||
FutureRes sendRequest(ConnectionPool* pool, DestinationId const& destination, RestVerb type,
|
||||
std::string const& path, velocypack::Buffer<uint8_t> payload,
|
||||
Headers headers, RequestOptions options) {
|
||||
// FIXME build future.reset(..)
|
||||
|
||||
if (!pool || !pool->config().clusterInfo) {
|
||||
|
@ -113,7 +130,7 @@ FutureRes sendRequest(ConnectionPool* pool, DestinationId const& destination, Re
|
|||
}
|
||||
TRI_ASSERT(!spec.endpoint.empty());
|
||||
|
||||
auto req = prepareRequest(type, path, std::move(payload), timeout, std::move(headers));
|
||||
auto req = prepareRequest(type, path, std::move(payload), std::move(headers), options);
|
||||
|
||||
struct Pack {
|
||||
DestinationId destination;
|
||||
|
@ -158,7 +175,7 @@ class RequestsState final : public std::enable_shared_from_this<RequestsState> {
|
|||
public:
|
||||
RequestsState(ConnectionPool* pool, DestinationId destination, RestVerb type,
|
||||
std::string path, velocypack::Buffer<uint8_t> payload,
|
||||
Timeout timeout, Headers headers, bool retryNotFound)
|
||||
Headers headers, RequestOptions options)
|
||||
: _pool(pool),
|
||||
_destination(std::move(destination)),
|
||||
_type(type),
|
||||
|
@ -168,9 +185,9 @@ class RequestsState final : public std::enable_shared_from_this<RequestsState> {
|
|||
_workItem(nullptr),
|
||||
_promise(),
|
||||
_startTime(std::chrono::steady_clock::now()),
|
||||
_endTime(_startTime +
|
||||
std::chrono::duration_cast<std::chrono::steady_clock::duration>(timeout)),
|
||||
_retryOnCollNotFound(retryNotFound) {}
|
||||
_endTime(_startTime + std::chrono::duration_cast<std::chrono::steady_clock::duration>(
|
||||
options.timeout)),
|
||||
_options(options) {}
|
||||
|
||||
~RequestsState() = default;
|
||||
|
||||
|
@ -187,7 +204,7 @@ class RequestsState final : public std::enable_shared_from_this<RequestsState> {
|
|||
|
||||
std::chrono::steady_clock::time_point const _startTime;
|
||||
std::chrono::steady_clock::time_point const _endTime;
|
||||
const bool _retryOnCollNotFound;
|
||||
RequestOptions const _options;
|
||||
|
||||
public:
|
||||
|
||||
|
@ -215,12 +232,14 @@ class RequestsState final : public std::enable_shared_from_this<RequestsState> {
|
|||
callResponse(Error::Canceled, nullptr);
|
||||
return;
|
||||
}
|
||||
|
||||
auto localTO = std::chrono::duration_cast<std::chrono::milliseconds>(_endTime - now);
|
||||
TRI_ASSERT(localTO.count() > 0);
|
||||
|
||||
auto localOptions = _options;
|
||||
localOptions.timeout =
|
||||
std::chrono::duration_cast<std::chrono::milliseconds>(_endTime - now);
|
||||
TRI_ASSERT(localOptions.timeout.count() > 0);
|
||||
|
||||
auto ref = _pool->leaseConnection(spec.endpoint);
|
||||
auto req = prepareRequest(_type, _path, _payload, localTO, _headers);
|
||||
auto req = prepareRequest(_type, _path, _payload, _headers, localOptions);
|
||||
auto self = RequestsState::shared_from_this();
|
||||
auto cb = [self, ref](fuerte::Error err,
|
||||
std::unique_ptr<fuerte::Request> req,
|
||||
|
@ -242,7 +261,7 @@ class RequestsState final : public std::enable_shared_from_this<RequestsState> {
|
|||
res->statusCode() == fuerte::StatusNoContent) {
|
||||
callResponse(Error::NoError, std::move(res));
|
||||
break;
|
||||
} else if (res->statusCode() == fuerte::StatusNotFound && _retryOnCollNotFound &&
|
||||
} else if (res->statusCode() == fuerte::StatusNotFound && _options.retryNotFound &&
|
||||
TRI_ERROR_ARANGO_DATA_SOURCE_NOT_FOUND ==
|
||||
network::errorCodeFromBody(res->slice())) {
|
||||
LOG_TOPIC("5a8e9", DEBUG, Logger::COMMUNICATION) << "retrying request";
|
||||
|
@ -328,6 +347,18 @@ FutureRes sendRequestRetry(ConnectionPool* pool, DestinationId const& destinatio
|
|||
arangodb::fuerte::RestVerb type, std::string const& path,
|
||||
velocypack::Buffer<uint8_t> payload, Timeout timeout,
|
||||
Headers headers, bool retryNotFound) {
|
||||
RequestOptions options;
|
||||
options.timeout = timeout;
|
||||
options.retryNotFound = retryNotFound;
|
||||
return sendRequestRetry(pool, std::move(destination), type, std::move(path),
|
||||
std::move(payload), std::move(headers), options);
|
||||
}
|
||||
|
||||
/// @brief send a request to a given destination, retry until timeout is exceeded
|
||||
FutureRes sendRequestRetry(ConnectionPool* pool, DestinationId const& destination,
|
||||
arangodb::fuerte::RestVerb type, std::string const& path,
|
||||
velocypack::Buffer<uint8_t> payload, Headers headers,
|
||||
RequestOptions options) {
|
||||
if (!pool || !pool->config().clusterInfo) {
|
||||
LOG_TOPIC("59b96", ERR, Logger::COMMUNICATION)
|
||||
<< "connection pool unavailable";
|
||||
|
@ -336,8 +367,8 @@ FutureRes sendRequestRetry(ConnectionPool* pool, DestinationId const& destinatio
|
|||
|
||||
// auto req = prepareRequest(type, path, std::move(payload), timeout, headers);
|
||||
auto rs = std::make_shared<RequestsState>(pool, destination, type, path,
|
||||
std::move(payload), timeout,
|
||||
std::move(headers), retryNotFound);
|
||||
std::move(payload),
|
||||
std::move(headers), options);
|
||||
rs->startRequest(); // will auto reference itself
|
||||
return rs->future();
|
||||
}
|
||||
|
|
|
@ -24,9 +24,10 @@
|
|||
#define ARANGOD_NETWORK_METHODS_H 1
|
||||
|
||||
#include "Basics/Result.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Futures/Future.h"
|
||||
#include "Network/types.h"
|
||||
#include "Network/ConnectionPool.h"
|
||||
#include "Network/types.h"
|
||||
|
||||
#include <fuerte/message.h>
|
||||
#include <velocypack/Buffer.h>
|
||||
|
@ -43,7 +44,7 @@ struct Response {
|
|||
DestinationId destination;
|
||||
fuerte::Error error; /// connectivity error
|
||||
std::unique_ptr<arangodb::fuerte::Response> response;
|
||||
|
||||
|
||||
bool ok() const {
|
||||
return fuerte::Error::NoError == this->error;
|
||||
}
|
||||
|
@ -65,19 +66,49 @@ struct Response {
|
|||
static_assert(std::is_nothrow_move_constructible<Response>::value, "");
|
||||
using FutureRes = arangodb::futures::Future<Response>;
|
||||
|
||||
// Container for optional (often defaulted) parameters
|
||||
struct RequestOptions {
|
||||
Timeout timeout = Timeout(120.0);
|
||||
std::string contentType = StaticStrings::MimeTypeVPack;
|
||||
std::string acceptType = StaticStrings::MimeTypeVPack;
|
||||
bool retryNotFound = false;
|
||||
};
|
||||
|
||||
/// @brief send a request to a given destination
|
||||
///
|
||||
/// deprecated, use alternative signature
|
||||
FutureRes sendRequest(ConnectionPool* pool, DestinationId const& destination,
|
||||
arangodb::fuerte::RestVerb type, std::string const& path,
|
||||
velocypack::Buffer<uint8_t> payload, Timeout timeout,
|
||||
Headers headers = {});
|
||||
|
||||
/// @brief send a request to a given destination
|
||||
FutureRes sendRequest(ConnectionPool* pool, DestinationId const& destination,
|
||||
arangodb::fuerte::RestVerb type, std::string const& path,
|
||||
velocypack::Buffer<uint8_t> payload = {},
|
||||
Headers headers = {}, RequestOptions options = {});
|
||||
|
||||
/// @brief send a request to a given destination, retry under certain conditions
|
||||
/// a retry will be triggered if the connection was lost our could not be established
|
||||
/// optionally a retry will be performed in the case of until timeout is exceeded
|
||||
///
|
||||
/// deprecated, use alternative signature
|
||||
FutureRes sendRequestRetry(ConnectionPool* pool, DestinationId const& destination,
|
||||
arangodb::fuerte::RestVerb type, std::string const& path,
|
||||
velocypack::Buffer<uint8_t> payload, Timeout timeout,
|
||||
Headers headers = {}, bool retryNotFound = false);
|
||||
|
||||
/// @brief send a request to a given destination, retry under certain conditions
|
||||
/// a retry will be triggered if the connection was lost our could not be established
|
||||
/// optionally a retry will be performed in the case of until timeout is exceeded
|
||||
FutureRes sendRequestRetry(ConnectionPool* pool, DestinationId const& destination,
|
||||
arangodb::fuerte::RestVerb type, std::string const& path,
|
||||
velocypack::Buffer<uint8_t> payload, Timeout timeout,
|
||||
Headers headers = {}, bool retryNotFound = false);
|
||||
velocypack::Buffer<uint8_t> payload = {},
|
||||
Headers headers = {}, RequestOptions options = {});
|
||||
|
||||
using Sender =
|
||||
std::function<FutureRes(DestinationId const&, arangodb::fuerte::RestVerb, std::string const&,
|
||||
velocypack::Buffer<uint8_t>, Timeout, Headers)>;
|
||||
|
||||
} // namespace network
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -215,6 +215,52 @@ int toArangoErrorCodeInternal(fuerte::Error err) {
|
|||
}
|
||||
} // namespace
|
||||
|
||||
fuerte::RestVerb arangoRestVerbToFuerte(rest::RequestType verb) {
|
||||
switch (verb) {
|
||||
case rest::RequestType::DELETE_REQ:
|
||||
return fuerte::RestVerb::Delete;
|
||||
case rest::RequestType::GET:
|
||||
return fuerte::RestVerb::Get;
|
||||
case rest::RequestType::POST:
|
||||
return fuerte::RestVerb::Post;
|
||||
case rest::RequestType::PUT:
|
||||
return fuerte::RestVerb::Put;
|
||||
case rest::RequestType::HEAD:
|
||||
return fuerte::RestVerb::Head;
|
||||
case rest::RequestType::PATCH:
|
||||
return fuerte::RestVerb::Patch;
|
||||
case rest::RequestType::OPTIONS:
|
||||
return fuerte::RestVerb::Options;
|
||||
case rest::RequestType::ILLEGAL:
|
||||
return fuerte::RestVerb::Illegal;
|
||||
}
|
||||
|
||||
return fuerte::RestVerb::Illegal;
|
||||
}
|
||||
|
||||
rest::RequestType fuerteRestVerbToArango(fuerte::RestVerb verb) {
|
||||
switch (verb) {
|
||||
case fuerte::RestVerb::Illegal:
|
||||
return rest::RequestType::ILLEGAL;
|
||||
case fuerte::RestVerb::Delete:
|
||||
return rest::RequestType::DELETE_REQ;
|
||||
case fuerte::RestVerb::Get:
|
||||
return rest::RequestType::GET;
|
||||
case fuerte::RestVerb::Post:
|
||||
return rest::RequestType::POST;
|
||||
case fuerte::RestVerb::Put:
|
||||
return rest::RequestType::PUT;
|
||||
case fuerte::RestVerb::Head:
|
||||
return rest::RequestType::HEAD;
|
||||
case fuerte::RestVerb::Patch:
|
||||
return rest::RequestType::PATCH;
|
||||
case fuerte::RestVerb::Options:
|
||||
return rest::RequestType::OPTIONS;
|
||||
}
|
||||
|
||||
return rest::RequestType::ILLEGAL;
|
||||
}
|
||||
|
||||
int fuerteToArangoErrorCode(network::Response const& res) {
|
||||
LOG_TOPIC_IF("abcde", ERR, Logger::CLUSTER, res.error != fuerte::Error::NoError)
|
||||
<< "cluster error: '" << fuerte::to_string(res.error)
|
||||
|
@ -227,5 +273,14 @@ int fuerteToArangoErrorCode(fuerte::Error err) {
|
|||
<< "cluster error: '" << fuerte::to_string(err) << "'";
|
||||
return toArangoErrorCodeInternal(err);
|
||||
}
|
||||
|
||||
std::string fuerteToArangoErrorMessage(network::Response const& res) {
|
||||
return TRI_errno_string(fuerteToArangoErrorCode(res));
|
||||
}
|
||||
|
||||
std::string fuerteToArangoErrorMessage(fuerte::Error err) {
|
||||
return TRI_errno_string(fuerteToArangoErrorCode(err));
|
||||
}
|
||||
|
||||
} // namespace network
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "Basics/Result.h"
|
||||
#include "Network/types.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
#include "Utils/OperationResult.h"
|
||||
|
||||
#include <fuerte/types.h>
|
||||
|
@ -67,6 +68,12 @@ void errorCodesFromHeaders(network::Headers headers,
|
|||
/// @brief transform response into arango error code
|
||||
int fuerteToArangoErrorCode(network::Response const& res);
|
||||
int fuerteToArangoErrorCode(fuerte::Error err);
|
||||
std::string fuerteToArangoErrorMessage(network::Response const& res);
|
||||
std::string fuerteToArangoErrorMessage(fuerte::Error err);
|
||||
|
||||
/// @brief convert between arango and fuerte rest methods
|
||||
fuerte::RestVerb arangoRestVerbToFuerte(rest::RequestType);
|
||||
rest::RequestType fuerteRestVerbToArango(fuerte::RestVerb);
|
||||
|
||||
} // namespace network
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Graph/Graph.h"
|
||||
#include "Graph/GraphManager.h"
|
||||
|
@ -73,22 +75,26 @@ RestStatus RestControlPregelHandler::execute() {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestControlPregelHandler::forwardingTarget() {
|
||||
std::string RestControlPregelHandler::forwardingTarget() {
|
||||
rest::RequestType const type = _request->requestType();
|
||||
if (type != rest::RequestType::POST && type != rest::RequestType::GET &&
|
||||
type != rest::RequestType::DELETE_REQ) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
std::vector<std::string> const& suffixes = _request->suffixes();
|
||||
if (suffixes.size() < 1) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
uint64_t tick = arangodb::basics::StringUtils::uint64(suffixes[0]);
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tick);
|
||||
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
||||
void RestControlPregelHandler::startExecution() {
|
||||
|
|
|
@ -38,7 +38,7 @@ class RestControlPregelHandler : public arangodb::RestVocbaseBaseHandler {
|
|||
RestStatus execute() override;
|
||||
|
||||
protected:
|
||||
virtual uint32_t forwardingTarget() override;
|
||||
virtual std::string forwardingTarget() override;
|
||||
|
||||
private:
|
||||
void startExecution();
|
||||
|
|
|
@ -26,9 +26,11 @@
|
|||
#include "Aql/QueryRegistry.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/MutexLocker.h"
|
||||
#include "Basics/ScopeGuard.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/ScopeGuard.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Transaction/Context.h"
|
||||
#include "Utils/Cursor.h"
|
||||
|
@ -368,21 +370,25 @@ RestStatus RestCursorHandler::handleQueryResult() {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestCursorHandler::forwardingTarget() {
|
||||
std::string RestCursorHandler::forwardingTarget() {
|
||||
rest::RequestType const type = _request->requestType();
|
||||
if (type != rest::RequestType::PUT && type != rest::RequestType::DELETE_REQ) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
std::vector<std::string> const& suffixes = _request->suffixes();
|
||||
if (suffixes.size() < 1) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
uint64_t tick = arangodb::basics::StringUtils::uint64(suffixes[0]);
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tick);
|
||||
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -88,7 +88,7 @@ class RestCursorHandler : public RestVocbaseBaseHandler {
|
|||
RestStatus processQuery();
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
virtual uint32_t forwardingTarget() override;
|
||||
virtual std::string forwardingTarget() override;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief unregister the currently running query
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Hints.h"
|
||||
|
@ -102,9 +104,9 @@ void RestDocumentHandler::shutdownExecute(bool isFinalized) noexcept {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestDocumentHandler::forwardingTarget() {
|
||||
std::string RestDocumentHandler::forwardingTarget() {
|
||||
if (!ServerState::instance()->isCoordinator()) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
bool found = false;
|
||||
|
@ -113,13 +115,17 @@ uint32_t RestDocumentHandler::forwardingTarget() {
|
|||
uint64_t tid = basics::StringUtils::uint64(value);
|
||||
if (!transaction::isCoordinatorTransactionId(tid)) {
|
||||
TRI_ASSERT(transaction::isLegacyTransactionId(tid));
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tid);
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -56,7 +56,7 @@ class RestDocumentHandler : public RestVocbaseBaseHandler {
|
|||
void shutdownExecute(bool isFinalized) noexcept override final;
|
||||
|
||||
protected:
|
||||
uint32_t forwardingTarget() override final;
|
||||
std::string forwardingTarget() override final;
|
||||
|
||||
private:
|
||||
// inserts a document
|
||||
|
|
|
@ -28,6 +28,8 @@
|
|||
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/conversions.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "GeneralServer/AsyncJobManager.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
@ -241,20 +243,24 @@ void RestJobHandler::deleteJob() {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestJobHandler::forwardingTarget() {
|
||||
std::string RestJobHandler::forwardingTarget() {
|
||||
rest::RequestType const type = _request->requestType();
|
||||
if (type != rest::RequestType::GET && type != rest::RequestType::PUT &&
|
||||
type != rest::RequestType::DELETE_REQ) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
std::vector<std::string> const& suffixes = _request->suffixes();
|
||||
if (suffixes.size() < 1) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
uint64_t tick = arangodb::basics::StringUtils::uint64(suffixes[0]);
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tick);
|
||||
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
|
|
@ -85,7 +85,7 @@ class RestJobHandler : public RestBaseHandler {
|
|||
void deleteJob();
|
||||
|
||||
protected:
|
||||
virtual uint32_t forwardingTarget() override;
|
||||
virtual std::string forwardingTarget() override;
|
||||
|
||||
private:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -350,9 +350,9 @@ bool RestQueryHandler::parseQuery() {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestQueryHandler::forwardingTarget() {
|
||||
std::string RestQueryHandler::forwardingTarget() {
|
||||
if (!ServerState::instance()->isCoordinator()) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
bool found = false;
|
||||
|
@ -361,11 +361,15 @@ uint32_t RestQueryHandler::forwardingTarget() {
|
|||
uint64_t tid = basics::StringUtils::uint64(value);
|
||||
if (!transaction::isCoordinatorTransactionId(tid)) {
|
||||
TRI_ASSERT(transaction::isLegacyTransactionId(tid));
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tid);
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
|
|
@ -96,7 +96,7 @@ class RestQueryHandler : public RestVocbaseBaseHandler {
|
|||
|
||||
bool parseQuery();
|
||||
|
||||
virtual uint32_t forwardingTarget() override;
|
||||
virtual std::string forwardingTarget() override;
|
||||
};
|
||||
} // namespace arangodb
|
||||
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include "Basics/RocksDBUtils.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterHelpers.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
|
@ -39,6 +38,8 @@
|
|||
#include "Cluster/ResignShardLeadership.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "Replication/DatabaseInitialSyncer.h"
|
||||
#include "Replication/DatabaseReplicationApplier.h"
|
||||
#include "Replication/GlobalInitialSyncer.h"
|
||||
|
@ -46,6 +47,7 @@
|
|||
#include "Replication/ReplicationApplierConfiguration.h"
|
||||
#include "Replication/ReplicationClients.h"
|
||||
#include "Replication/ReplicationFeature.h"
|
||||
#include "Rest/HttpResponse.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/QueryRegistryFeature.h"
|
||||
#include "RestServer/ServerIdFeature.h"
|
||||
|
@ -336,7 +338,7 @@ RestStatus RestReplicationHandler::execute() {
|
|||
// DEL - delete batchid
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
handleTrampolineCoordinator();
|
||||
handleUnforwardedTrampolineCoordinator();
|
||||
} else {
|
||||
handleCommandBatch();
|
||||
}
|
||||
|
@ -366,7 +368,7 @@ RestStatus RestReplicationHandler::execute() {
|
|||
goto BAD_CALL;
|
||||
}
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
handleTrampolineCoordinator();
|
||||
handleUnforwardedTrampolineCoordinator();
|
||||
} else {
|
||||
handleCommandInventory();
|
||||
}
|
||||
|
@ -418,7 +420,7 @@ RestStatus RestReplicationHandler::execute() {
|
|||
}
|
||||
|
||||
if (ServerState::instance()->isCoordinator()) {
|
||||
handleTrampolineCoordinator();
|
||||
handleUnforwardedTrampolineCoordinator();
|
||||
} else {
|
||||
handleCommandDump();
|
||||
}
|
||||
|
@ -584,6 +586,29 @@ BAD_CALL:
|
|||
return RestStatus::DONE;
|
||||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
std::string RestReplicationHandler::forwardingTarget() {
|
||||
if (!ServerState::instance()->isCoordinator()) {
|
||||
return "";
|
||||
}
|
||||
|
||||
auto const& suffixes = _request->suffixes();
|
||||
size_t const len = suffixes.size();
|
||||
if (len >= 1) {
|
||||
auto const type = _request->requestType();
|
||||
std::string const& command = suffixes[0];
|
||||
if ((command == Batch) || (command == Inventory && type == rest::RequestType::GET) ||
|
||||
(command == Dump && type == rest::RequestType::GET)) {
|
||||
ServerID const& DBserver = _request->value("DBserver");
|
||||
if (!DBserver.empty()) {
|
||||
return DBserver;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return "";
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief was docuBlock JSF_put_api_replication_makeSlave
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -640,14 +665,14 @@ void RestReplicationHandler::handleCommandMakeSlave() {
|
|||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief forward a command in the coordinator case
|
||||
/// @brief handle an unforwarded command in the coordinator case
|
||||
/// If the request is well-formed and has the DBserver set, then the request
|
||||
/// should already be forwarded by other means. We should only get here if
|
||||
/// the request is null or the DBserver parameter is missing. This method
|
||||
/// now just does a bit of error handling.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void RestReplicationHandler::handleTrampolineCoordinator() {
|
||||
bool useVst = false;
|
||||
if (_request->transportType() == Endpoint::TransportType::VST) {
|
||||
useVst = true;
|
||||
}
|
||||
void RestReplicationHandler::handleUnforwardedTrampolineCoordinator() {
|
||||
if (_request == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid request");
|
||||
}
|
||||
|
@ -661,100 +686,7 @@ void RestReplicationHandler::handleTrampolineCoordinator() {
|
|||
return;
|
||||
}
|
||||
|
||||
std::string const& dbname = _request->databaseName();
|
||||
|
||||
auto headers = std::make_shared<std::unordered_map<std::string, std::string>>(
|
||||
arangodb::getForwardableRequestHeaders(_request.get()));
|
||||
std::unordered_map<std::string, std::string> values = _request->values();
|
||||
std::string params;
|
||||
|
||||
for (auto const& i : values) {
|
||||
if (i.first != "DBserver") {
|
||||
if (params.empty()) {
|
||||
params.push_back('?');
|
||||
} else {
|
||||
params.push_back('&');
|
||||
}
|
||||
params.append(StringUtils::urlEncode(i.first));
|
||||
params.push_back('=');
|
||||
params.append(StringUtils::urlEncode(i.second));
|
||||
}
|
||||
}
|
||||
|
||||
// Set a few variables needed for our work:
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
generateError(rest::ResponseCode::SERVICE_UNAVAILABLE,
|
||||
TRI_ERROR_SHUTTING_DOWN, "shutting down server");
|
||||
return;
|
||||
}
|
||||
|
||||
std::unique_ptr<ClusterCommResult> res;
|
||||
if (!useVst) {
|
||||
TRI_ASSERT(_request->transportType() == Endpoint::TransportType::HTTP);
|
||||
|
||||
VPackStringRef body = _request->rawPayload();
|
||||
// Send a synchronous request to that shard using ClusterComm:
|
||||
res = cc->syncRequest(TRI_NewTickServer(), "server:" + DBserver,
|
||||
_request->requestType(),
|
||||
"/_db/" + StringUtils::urlEncode(dbname) +
|
||||
_request->requestPath() + params,
|
||||
body.toString(), *headers, 300.0);
|
||||
} else {
|
||||
// do we need to handle multiple payloads here - TODO
|
||||
// here we switch from vst to http?!
|
||||
res = cc->syncRequest(TRI_NewTickServer(), "server:" + DBserver,
|
||||
_request->requestType(),
|
||||
"/_db/" + StringUtils::urlEncode(dbname) +
|
||||
_request->requestPath() + params,
|
||||
_request->payload().toJson(), *headers, 300.0);
|
||||
}
|
||||
|
||||
if (res->status == CL_COMM_TIMEOUT) {
|
||||
// No reply, we give up:
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_CLUSTER_TIMEOUT,
|
||||
"timeout within cluster");
|
||||
return;
|
||||
}
|
||||
if (res->status == CL_COMM_BACKEND_UNAVAILABLE) {
|
||||
// there is no result
|
||||
generateError(rest::ResponseCode::BAD, TRI_ERROR_CLUSTER_CONNECTION_LOST,
|
||||
"lost connection within cluster");
|
||||
return;
|
||||
}
|
||||
if (res->status == CL_COMM_ERROR) {
|
||||
// This could be a broken connection or an Http error:
|
||||
TRI_ASSERT(nullptr != res->result && res->result->isComplete());
|
||||
// In this case a proper HTTP error was reported by the DBserver,
|
||||
// we simply forward the result.
|
||||
// We intentionally fall through here.
|
||||
}
|
||||
|
||||
bool dummy;
|
||||
resetResponse(static_cast<rest::ResponseCode>(res->result->getHttpReturnCode()));
|
||||
|
||||
_response->setContentType(
|
||||
res->result->getHeaderField(StaticStrings::ContentTypeHeader, dummy));
|
||||
|
||||
if (!useVst) {
|
||||
HttpResponse* httpResponse = dynamic_cast<HttpResponse*>(_response.get());
|
||||
if (_response == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"invalid response type");
|
||||
}
|
||||
httpResponse->body().swap(&(res->result->getBody()));
|
||||
} else {
|
||||
std::shared_ptr<VPackBuilder> builder = res->result->getBodyVelocyPack();
|
||||
std::shared_ptr<VPackBuffer<uint8_t>> buf = builder->steal();
|
||||
_response->setPayload(std::move(*buf),
|
||||
true); // do we need to generate the body?!
|
||||
}
|
||||
|
||||
auto const& resultHeaders = res->result->getHeaderFields();
|
||||
for (auto const& it : resultHeaders) {
|
||||
_response->setHeader(it.first, it.second);
|
||||
}
|
||||
TRI_ASSERT(false); // should only get here if request is not well-formed
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -106,6 +106,8 @@ class RestReplicationHandler : public RestVocbaseBaseHandler {
|
|||
static std::string const HoldReadLockCollection;
|
||||
|
||||
protected:
|
||||
std::string forwardingTarget() override final;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief creates an error if called on a coordinator server
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -122,7 +124,7 @@ class RestReplicationHandler : public RestVocbaseBaseHandler {
|
|||
/// @brief forward a command in the coordinator case
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void handleTrampolineCoordinator();
|
||||
void handleUnforwardedTrampolineCoordinator();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns the cluster inventory, only on coordinator
|
||||
|
|
|
@ -25,6 +25,8 @@
|
|||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "V8/JavaScriptSecurityContext.h"
|
||||
#include "V8/v8-globals.h"
|
||||
|
@ -72,22 +74,26 @@ RestStatus RestTasksHandler::execute() {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestTasksHandler::forwardingTarget() {
|
||||
std::string RestTasksHandler::forwardingTarget() {
|
||||
rest::RequestType const type = _request->requestType();
|
||||
if (type != rest::RequestType::POST && type != rest::RequestType::PUT &&
|
||||
type != rest::RequestType::GET && type != rest::RequestType::DELETE_REQ) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
std::vector<std::string> const& suffixes = _request->suffixes();
|
||||
if (suffixes.size() < 1) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
uint64_t tick = arangodb::basics::StringUtils::uint64(suffixes[0]);
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tick);
|
||||
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
||||
void RestTasksHandler::getTasks() {
|
||||
|
|
|
@ -38,7 +38,7 @@ class RestTasksHandler : public arangodb::RestVocbaseBaseHandler {
|
|||
RestStatus execute() override;
|
||||
|
||||
protected:
|
||||
virtual uint32_t forwardingTarget() override;
|
||||
virtual std::string forwardingTarget() override;
|
||||
|
||||
private:
|
||||
void getTasks();
|
||||
|
|
|
@ -27,11 +27,13 @@
|
|||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Manager.h"
|
||||
#include "Transaction/ManagerFeature.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Status.h"
|
||||
#include "V8/JavaScriptSecurityContext.h"
|
||||
#include "V8Server/V8Context.h"
|
||||
|
@ -325,20 +327,24 @@ bool RestTransactionHandler::cancel() {
|
|||
}
|
||||
|
||||
/// @brief returns the short id of the server which should handle this request
|
||||
uint32_t RestTransactionHandler::forwardingTarget() {
|
||||
std::string RestTransactionHandler::forwardingTarget() {
|
||||
rest::RequestType const type = _request->requestType();
|
||||
if (type != rest::RequestType::GET && type != rest::RequestType::PUT &&
|
||||
type != rest::RequestType::DELETE_REQ) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
std::vector<std::string> const& suffixes = _request->suffixes();
|
||||
if (suffixes.size() < 1) {
|
||||
return 0;
|
||||
return "";
|
||||
}
|
||||
|
||||
uint64_t tick = arangodb::basics::StringUtils::uint64(suffixes[0]);
|
||||
uint32_t sourceServer = TRI_ExtractServerIdFromTick(tick);
|
||||
|
||||
return (sourceServer == ServerState::instance()->getShortId()) ? 0 : sourceServer;
|
||||
if (sourceServer == ServerState::instance()->getShortId()) {
|
||||
return "";
|
||||
}
|
||||
auto& ci = server().getFeature<ClusterFeature>().clusterInfo();
|
||||
return ci.getCoordinatorByShortID(sourceServer);
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ class RestTransactionHandler : public arangodb::RestVocbaseBaseHandler {
|
|||
bool cancel() override final;
|
||||
|
||||
protected:
|
||||
virtual uint32_t forwardingTarget() override;
|
||||
virtual std::string forwardingTarget() override;
|
||||
|
||||
private:
|
||||
void executeGetState();
|
||||
|
|
|
@ -27,9 +27,13 @@
|
|||
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/system-functions.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "ShardDistributionReporter.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
@ -260,10 +264,8 @@ static void ReportOffSync(LogicalCollection const* col, ShardMap const* shardIds
|
|||
result.close();
|
||||
}
|
||||
|
||||
ShardDistributionReporter::ShardDistributionReporter(std::shared_ptr<ClusterComm> cc,
|
||||
ClusterInfo* ci)
|
||||
: _cc(cc), _ci(ci) {
|
||||
TRI_ASSERT(_cc != nullptr);
|
||||
ShardDistributionReporter::ShardDistributionReporter(ClusterInfo* ci, network::Sender sender)
|
||||
: _ci(ci), _send(sender) {
|
||||
TRI_ASSERT(_ci != nullptr);
|
||||
}
|
||||
|
||||
|
@ -273,8 +275,15 @@ std::shared_ptr<ShardDistributionReporter> ShardDistributionReporter::instance(
|
|||
application_features::ApplicationServer& server) {
|
||||
if (_theInstance == nullptr) {
|
||||
auto& ci = server.getFeature<ClusterFeature>().clusterInfo();
|
||||
_theInstance =
|
||||
std::make_shared<ShardDistributionReporter>(ClusterComm::instance(), &ci);
|
||||
auto& nf = server.getFeature<NetworkFeature>();
|
||||
auto* pool = nf.pool();
|
||||
_theInstance = std::make_shared<ShardDistributionReporter>(
|
||||
&ci,
|
||||
[pool](network::DestinationId const& d, arangodb::fuerte::RestVerb v,
|
||||
std::string const& u, velocypack::Buffer<uint8_t> b,
|
||||
network::Timeout t, network::Headers h) -> network::FutureRes {
|
||||
return sendRequest(pool, d, v, u, std::move(b), t, std::move(h));
|
||||
});
|
||||
}
|
||||
return _theInstance;
|
||||
}
|
||||
|
@ -284,7 +293,6 @@ void ShardDistributionReporter::helperDistributionForDatabase(
|
|||
std::queue<std::shared_ptr<LogicalCollection>>& todoSyncStateCheck, double endtime,
|
||||
std::unordered_map<std::string, std::string>& aliases, bool progress) {
|
||||
if (!todoSyncStateCheck.empty()) {
|
||||
CoordTransactionID coordId = TRI_NewTickServer();
|
||||
std::unordered_map<ShardID, SyncCountInfo> counters;
|
||||
std::vector<ServerID> serversToAsk;
|
||||
while (!todoSyncStateCheck.empty()) {
|
||||
|
@ -297,9 +305,8 @@ void ShardDistributionReporter::helperDistributionForDatabase(
|
|||
// Send requests
|
||||
for (auto const& s : *(allShards.get())) {
|
||||
double timeleft = endtime - TRI_microtime();
|
||||
network::Timeout timeout(timeleft);
|
||||
serversToAsk.clear();
|
||||
uint64_t requestsInFlight = 0;
|
||||
OperationID leaderOpId = 0;
|
||||
auto curServers = cic->servers(s.first);
|
||||
auto& entry = counters[s.first]; // Emplaces a new SyncCountInfo
|
||||
if (curServers.empty() || s.second.empty()) {
|
||||
|
@ -314,15 +321,12 @@ void ShardDistributionReporter::helperDistributionForDatabase(
|
|||
"/_api/collection/" +
|
||||
basics::StringUtils::urlEncode(s.first) +
|
||||
"/count";
|
||||
auto body = std::make_shared<std::string const>();
|
||||
VPackBuffer<uint8_t> body;
|
||||
|
||||
{
|
||||
// First Ask the leader
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
leaderOpId = _cc->asyncRequest(coordId, "server:" + s.second.at(0),
|
||||
rest::RequestType::GET, path, body,
|
||||
headers, nullptr, timeleft);
|
||||
}
|
||||
// First Ask the leader
|
||||
network::Headers headers;
|
||||
auto leaderF = _send("server:" + s.second.at(0), fuerte::RestVerb::Get,
|
||||
path, body, timeout, headers);
|
||||
|
||||
// Now figure out which servers need to be asked
|
||||
for (auto const& planned : s.second) {
|
||||
|
@ -339,71 +343,74 @@ void ShardDistributionReporter::helperDistributionForDatabase(
|
|||
}
|
||||
|
||||
// Ask them
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
std::vector<network::FutureRes> futures;
|
||||
futures.reserve(serversToAsk.size());
|
||||
for (auto const& server : serversToAsk) {
|
||||
_cc->asyncRequest(coordId, "server:" + server, rest::RequestType::GET,
|
||||
path, body, headers, nullptr, timeleft);
|
||||
requestsInFlight++;
|
||||
network::Headers headers;
|
||||
auto f = _send("server:" + server, fuerte::RestVerb::Get, path,
|
||||
body, timeout, headers);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
|
||||
// Wait for responses
|
||||
// First wait for Leader
|
||||
{
|
||||
auto result = _cc->wait(coordId, leaderOpId, "");
|
||||
if (result.status != CL_COMM_RECEIVED) {
|
||||
auto& res = leaderF.get();
|
||||
if (fuerteToArangoErrorCode(res) != TRI_ERROR_NO_ERROR || !res.response) {
|
||||
// We did not even get count for leader, use defaults
|
||||
_cc->drop(coordId, 0, "");
|
||||
// Just in case, to get a new state
|
||||
coordId = TRI_NewTickServer();
|
||||
continue;
|
||||
}
|
||||
auto body = result.result->getBodyVelocyPack();
|
||||
VPackSlice response = body->slice();
|
||||
if (!response.isObject()) {
|
||||
|
||||
std::vector<VPackSlice> const& slices = res.response->slices();
|
||||
if (slices.empty() || !slices[0].isObject()) {
|
||||
LOG_TOPIC("c02b2", WARN, arangodb::Logger::CLUSTER)
|
||||
<< "Received invalid response for count. Shard "
|
||||
"distribution "
|
||||
"inaccurate";
|
||||
<< "distribution inaccurate";
|
||||
continue;
|
||||
}
|
||||
response = response.get("count");
|
||||
|
||||
VPackSlice response = slices[0].get("count");
|
||||
if (!response.isNumber()) {
|
||||
LOG_TOPIC("fe868", WARN, arangodb::Logger::CLUSTER)
|
||||
<< "Received invalid response for count. Shard "
|
||||
"distribution "
|
||||
"inaccurate";
|
||||
<< "distribution inaccurate";
|
||||
continue;
|
||||
}
|
||||
|
||||
entry.total = response.getNumber<uint64_t>();
|
||||
entry.current = entry.total; // << We use this to flip around min/max test
|
||||
}
|
||||
|
||||
// Now wait for others
|
||||
while (requestsInFlight > 0) {
|
||||
auto result = _cc->wait(coordId, 0, "");
|
||||
requestsInFlight--;
|
||||
if (result.status != CL_COMM_RECEIVED) {
|
||||
// We do not care for errors of any kind.
|
||||
// We can continue here because all other requests will be
|
||||
// handled by the accumulated timeout
|
||||
continue;
|
||||
} else {
|
||||
auto body = result.result->getBodyVelocyPack();
|
||||
VPackSlice response = body->slice();
|
||||
if (!response.isObject()) {
|
||||
{
|
||||
auto responses = futures::collectAll(futures).get();
|
||||
for (futures::Try<network::Response> const& response : responses) {
|
||||
if (!response.hasValue() ||
|
||||
fuerteToArangoErrorCode(response.get()) != TRI_ERROR_NO_ERROR ||
|
||||
!response.get().response) {
|
||||
// We do not care for errors of any kind.
|
||||
// We can continue here because all other requests will be
|
||||
// handled by the accumulated timeout
|
||||
continue;
|
||||
}
|
||||
|
||||
auto& res = response.get();
|
||||
std::vector<VPackSlice> const& slices = res.response->slices();
|
||||
if (slices.empty() || !slices[0].isObject()) {
|
||||
LOG_TOPIC("fcbb3", WARN, arangodb::Logger::CLUSTER)
|
||||
<< "Received invalid response for count. Shard "
|
||||
"distribution inaccurate";
|
||||
<< "distribution inaccurate";
|
||||
continue;
|
||||
}
|
||||
response = response.get("count");
|
||||
if (!response.isNumber()) {
|
||||
|
||||
VPackSlice answer = slices[0].get("count");
|
||||
if (!answer.isNumber()) {
|
||||
LOG_TOPIC("8d7b0", WARN, arangodb::Logger::CLUSTER)
|
||||
<< "Received invalid response for count. Shard "
|
||||
"distribution inaccurate";
|
||||
<< "distribution inaccurate";
|
||||
continue;
|
||||
}
|
||||
uint64_t other = response.getNumber<uint64_t>();
|
||||
|
||||
uint64_t other = answer.getNumber<uint64_t>();
|
||||
if (other < entry.total) {
|
||||
// If we have more in total we need the minimum of other
|
||||
// counts
|
||||
|
|
|
@ -24,12 +24,12 @@
|
|||
#define ARANGOD_CLUSTER_SHARD_DISTRIBUTED_REPORTER_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Network/Methods.h"
|
||||
|
||||
#include <queue>
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
class ClusterComm;
|
||||
class ClusterInfo;
|
||||
|
||||
namespace velocypack {
|
||||
|
@ -54,7 +54,7 @@ class ShardDistributionReporter {
|
|||
static std::shared_ptr<ShardDistributionReporter> _theInstance;
|
||||
|
||||
public:
|
||||
ShardDistributionReporter(std::shared_ptr<ClusterComm> cc, ClusterInfo* ci);
|
||||
ShardDistributionReporter(ClusterInfo* ci, network::Sender sender);
|
||||
|
||||
~ShardDistributionReporter();
|
||||
|
||||
|
@ -75,8 +75,8 @@ class ShardDistributionReporter {
|
|||
std::unordered_map<std::string, std::string>& aliases, bool progress);
|
||||
|
||||
private:
|
||||
std::shared_ptr<ClusterComm> _cc;
|
||||
ClusterInfo* _ci;
|
||||
network::Sender _send;
|
||||
};
|
||||
} // namespace cluster
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -26,14 +26,16 @@
|
|||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Basics/system-functions.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "GeneralServer/AuthenticationFeature.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Logger/LoggerStream.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "StorageEngine/TransactionState.h"
|
||||
|
@ -872,24 +874,28 @@ void Manager::toVelocyPack(VPackBuilder& builder, std::string const& database,
|
|||
TRI_ASSERT(ServerState::instance()->isCoordinator());
|
||||
auto& ci = _feature.server().getFeature<ClusterFeature>().clusterInfo();
|
||||
|
||||
std::shared_ptr<ClusterComm> cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
NetworkFeature const& nf = _feature.server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_SHUTTING_DOWN);
|
||||
}
|
||||
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
std::vector<network::FutureRes> futures;
|
||||
auto auth = AuthenticationFeature::instance();
|
||||
|
||||
network::RequestOptions options;
|
||||
options.timeout = network::Timeout(30.0);
|
||||
|
||||
VPackBuffer<uint8_t> body;
|
||||
|
||||
for (auto const& coordinator : ci.getCurrentCoordinators()) {
|
||||
if (coordinator == ServerState::instance()->getId()) {
|
||||
// ourselves!
|
||||
continue;
|
||||
}
|
||||
|
||||
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
network::Headers headers;
|
||||
if (auth != nullptr && auth->isActive()) {
|
||||
// when in superuser mode, username is empty
|
||||
// in this case ClusterComm will add the default superuser token
|
||||
if (!username.empty()) {
|
||||
VPackBuilder builder;
|
||||
{
|
||||
|
@ -897,31 +903,38 @@ void Manager::toVelocyPack(VPackBuilder& builder, std::string const& database,
|
|||
payload->add("preferred_username", VPackValue(username));
|
||||
}
|
||||
VPackSlice slice = builder.slice();
|
||||
headers->emplace(StaticStrings::Authorization,
|
||||
"bearer " + auth->tokenCache().generateJwt(slice));
|
||||
headers.emplace(StaticStrings::Authorization,
|
||||
"bearer " + auth->tokenCache().generateJwt(slice));
|
||||
} else {
|
||||
headers.emplace(StaticStrings::Authorization,
|
||||
"bearer " + auth->tokenCache().jwtToken());
|
||||
}
|
||||
}
|
||||
|
||||
requests.emplace_back("server:" + coordinator, rest::RequestType::GET,
|
||||
"/_db/" + database + "/_api/transaction?local=true",
|
||||
std::make_shared<std::string>(), std::move(headers));
|
||||
auto f = network::sendRequest(pool, "server:" + coordinator, fuerte::RestVerb::Get,
|
||||
"/_db/" + database +
|
||||
"/_api/transaction?local=true",
|
||||
body, std::move(headers), options);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
|
||||
if (!requests.empty()) {
|
||||
size_t nrGood = cc->performRequests(requests, 30.0, Logger::COMMUNICATION, false);
|
||||
|
||||
if (nrGood != requests.size()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE);
|
||||
}
|
||||
for (auto const& it : requests) {
|
||||
if (it.result.result && it.result.result->getHttpReturnCode() == 200) {
|
||||
auto const body = it.result.result->getBodyVelocyPack();
|
||||
VPackSlice slice = body->slice();
|
||||
if (slice.isObject()) {
|
||||
slice = slice.get("transactions");
|
||||
if (slice.isArray()) {
|
||||
for (auto const& it : VPackArrayIterator(slice)) {
|
||||
builder.add(it);
|
||||
if (!futures.empty()) {
|
||||
auto responses = futures::collectAll(futures).get();
|
||||
for (auto const& it : responses) {
|
||||
if (!it.hasValue()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE);
|
||||
}
|
||||
auto& res = it.get();
|
||||
if (res.response && res.response->statusCode() == fuerte::StatusOK) {
|
||||
auto slices = res.response->slices();
|
||||
if (!slices.empty()) {
|
||||
VPackSlice slice = slices[0];
|
||||
if (slice.isObject()) {
|
||||
slice = slice.get("transactions");
|
||||
if (slice.isArray()) {
|
||||
for (auto const& it : VPackArrayIterator(slice)) {
|
||||
builder.add(it);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@
|
|||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/encoding.h"
|
||||
#include "Basics/system-compiler.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Cluster/ClusterTrxMethods.h"
|
||||
|
@ -709,10 +708,10 @@ std::pair<bool, bool> transaction::Methods::findIndexHandleForAndNode(
|
|||
}
|
||||
|
||||
/// @brief Find out if any of the given requests has ended in a refusal
|
||||
static bool findRefusal(std::vector<ClusterCommRequest> const& requests) {
|
||||
for (auto const& it : requests) {
|
||||
if (it.done && it.result.status == CL_COMM_RECEIVED &&
|
||||
it.result.answer_code == rest::ResponseCode::NOT_ACCEPTABLE) {
|
||||
static bool findRefusal(std::vector<futures::Try<network::Response>> const& responses) {
|
||||
for (auto const& it : responses) {
|
||||
if (it.hasValue() && it.get().ok() &&
|
||||
it.get().response->statusCode() == fuerte::StatusNotAcceptable) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -2457,42 +2456,43 @@ Future<OperationResult> transaction::Methods::truncateLocal(std::string const& c
|
|||
TRI_ASSERT(!_state->hasHint(Hints::Hint::FROM_TOPLEVEL_AQL));
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
||||
if (cc != nullptr) {
|
||||
NetworkFeature const& nf = vocbase().server().getFeature<NetworkFeature>();
|
||||
network::ConnectionPool* pool = nf.pool();
|
||||
if (pool != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
std::string path =
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(vocbase().name()) +
|
||||
"/_api/collection/" + arangodb::basics::StringUtils::urlEncode(collectionName) +
|
||||
"/truncate?isSynchronousReplication=" + ServerState::instance()->getId();
|
||||
auto body = std::make_shared<std::string>();
|
||||
VPackBuffer<uint8_t> body;
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
requests.reserve(followers->size());
|
||||
std::vector<network::FutureRes> futures;
|
||||
futures.reserve(followers->size());
|
||||
|
||||
for (auto const& f : *followers) {
|
||||
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
|
||||
ClusterTrxMethods::addTransactionHeader(*this, f, *headers);
|
||||
requests.emplace_back("server:" + f, arangodb::rest::RequestType::PUT,
|
||||
path, body, std::move(headers));
|
||||
network::Headers headers;
|
||||
ClusterTrxMethods::addTransactionHeader(*this, f, headers);
|
||||
auto future = network::sendRequest(pool, "server:" + f, fuerte::RestVerb::Put,
|
||||
path, body, std::move(headers));
|
||||
futures.emplace_back(std::move(future));
|
||||
}
|
||||
|
||||
cc->performRequests(requests, 120.0, Logger::REPLICATION, false);
|
||||
auto responses = futures::collectAll(futures).get();
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
if (findRefusal(responses)) {
|
||||
return futures::makeFuture(OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED));
|
||||
}
|
||||
// we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done && requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code == rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::OK);
|
||||
responses[i].hasValue() && responses[i].get().ok() &&
|
||||
(responses[i].get().response->statusCode() == fuerte::StatusAccepted ||
|
||||
responses[i].get().response->statusCode() == fuerte::StatusOK);
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
Result res = followerInfo->remove((*followers)[i]);
|
||||
|
@ -2542,11 +2542,6 @@ futures::Future<OperationResult> transaction::Methods::countAsync(std::string co
|
|||
futures::Future<OperationResult> transaction::Methods::countCoordinator(
|
||||
std::string const& collectionName, transaction::CountType type) {
|
||||
auto& feature = vocbase().server().getFeature<ClusterFeature>();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
// nullptr happens only during controlled shutdown
|
||||
return futures::makeFuture(OperationResult(TRI_ERROR_SHUTTING_DOWN));
|
||||
}
|
||||
ClusterInfo& ci = feature.clusterInfo();
|
||||
|
||||
// First determine the collection ID from the name:
|
||||
|
|
|
@ -33,12 +33,16 @@
|
|||
#include "Basics/conversions.h"
|
||||
#include "Basics/files.h"
|
||||
#include "Basics/tri-strings.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "Cluster/ClusterInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Futures/Utilities.h"
|
||||
#include "GeneralServer/GeneralServer.h"
|
||||
#include "GeneralServer/ServerSecurityFeature.h"
|
||||
#include "Logger/LogMacros.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Network/Methods.h"
|
||||
#include "Network/NetworkFeature.h"
|
||||
#include "Network/Utils.h"
|
||||
#include "Rest/GeneralRequest.h"
|
||||
#include "Rest/HttpRequest.h"
|
||||
#include "Rest/HttpResponse.h"
|
||||
|
@ -1415,41 +1419,42 @@ static int clusterSendToAllServers(std::string const& dbname,
|
|||
arangodb::rest::RequestType const& method,
|
||||
std::string const& body) {
|
||||
auto& server = application_features::ApplicationServer::server();
|
||||
ClusterInfo& ci = server.getFeature<ClusterFeature>().clusterInfo();
|
||||
auto cc = ClusterComm::instance();
|
||||
if (cc == nullptr) {
|
||||
network::ConnectionPool* pool = server.getFeature<NetworkFeature>().pool();
|
||||
if (!pool || !pool->config().clusterInfo) {
|
||||
LOG_TOPIC("98fc7", ERR, Logger::COMMUNICATION) << "Network pool unavailable.";
|
||||
return TRI_ERROR_SHUTTING_DOWN;
|
||||
}
|
||||
ClusterInfo& ci = *pool->config().clusterInfo;
|
||||
|
||||
network::Headers headers;
|
||||
fuerte::RestVerb verb = network::arangoRestVerbToFuerte(method);
|
||||
std::string url = "/_db/" + StringUtils::urlEncode(dbname) + "/" + path;
|
||||
auto timeout = std::chrono::seconds(3600);
|
||||
|
||||
std::vector<futures::Future<network::Response>> futures;
|
||||
|
||||
// Have to propagate to DB Servers
|
||||
std::vector<ServerID> DBServers;
|
||||
CoordTransactionID coordTransactionID = TRI_NewTickServer();
|
||||
auto reqBodyString = std::make_shared<std::string>(body);
|
||||
|
||||
DBServers = ci.getCurrentDBServers();
|
||||
std::unordered_map<std::string, std::string> headers;
|
||||
std::vector<ServerID> DBServers = ci.getCurrentDBServers();
|
||||
for (auto const& sid : DBServers) {
|
||||
cc->asyncRequest(coordTransactionID, "server:" + sid, method, url,
|
||||
reqBodyString, headers, nullptr, 3600.0);
|
||||
VPackBuffer<uint8_t> buffer(body.size());
|
||||
buffer.append(body);
|
||||
auto f = network::sendRequest(pool, "server:" + sid, verb, url,
|
||||
std::move(buffer), timeout, headers);
|
||||
futures.emplace_back(std::move(f));
|
||||
}
|
||||
|
||||
// Now listen to the results:
|
||||
size_t count = DBServers.size();
|
||||
|
||||
for (; count > 0; count--) {
|
||||
auto res = cc->wait(coordTransactionID, 0, "", 0.0);
|
||||
if (res.status == CL_COMM_TIMEOUT) {
|
||||
cc->drop(coordTransactionID, 0, "");
|
||||
return TRI_ERROR_CLUSTER_TIMEOUT;
|
||||
}
|
||||
if (res.status == CL_COMM_ERROR || res.status == CL_COMM_DROPPED ||
|
||||
res.status == CL_COMM_BACKEND_UNAVAILABLE) {
|
||||
cc->drop(coordTransactionID, 0, "");
|
||||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
return futures::collectAll(futures)
|
||||
.thenValue([](std::vector<futures::Try<network::Response>>&& responses) -> int {
|
||||
for (futures::Try<network::Response> const& tryRes : responses) {
|
||||
network::Response const& res = tryRes.get(); // throws exceptions upwards
|
||||
int commError = network::fuerteToArangoErrorCode(res);
|
||||
if (commError != TRI_ERROR_NO_ERROR) {
|
||||
return commError;
|
||||
}
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
})
|
||||
.get();
|
||||
}
|
||||
#endif
|
||||
|
||||
|
|
|
@ -210,6 +210,9 @@ std::string const StaticStrings::XContentTypeOptions("x-content-type-options");
|
|||
std::string const StaticStrings::XArangoFrontend("x-arango-frontend");
|
||||
|
||||
// mime types
|
||||
std::string const StaticStrings::MimeTypeDump(
|
||||
"application/x-arango-dump; charset=utf-8");
|
||||
std::string const StaticStrings::MimeTypeHtml("text/html; charset=utf-8");
|
||||
std::string const StaticStrings::MimeTypeJson(
|
||||
"application/json; charset=utf-8");
|
||||
std::string const StaticStrings::MimeTypeText("text/plain; charset=utf-8");
|
||||
|
|
|
@ -196,6 +196,8 @@ class StaticStrings {
|
|||
static std::string const XArangoFrontend;
|
||||
|
||||
// mime types
|
||||
static std::string const MimeTypeDump;
|
||||
static std::string const MimeTypeHtml;
|
||||
static std::string const MimeTypeJson;
|
||||
static std::string const MimeTypeText;
|
||||
static std::string const MimeTypeVPack;
|
||||
|
|
|
@ -223,6 +223,7 @@ add_library(arango STATIC
|
|||
Random/RandomFeature.cpp
|
||||
Random/RandomGenerator.cpp
|
||||
Random/UniformCharacter.cpp
|
||||
Rest/CommonDefines.cpp
|
||||
Rest/GeneralRequest.cpp
|
||||
Rest/GeneralResponse.cpp
|
||||
Rest/VstRequest.cpp
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <thread>
|
||||
|
||||
#include "Basics/debugging.h"
|
||||
|
@ -213,6 +214,9 @@ class Future {
|
|||
// True when the result (or exception) is ready
|
||||
bool isReady() const { return getState().hasResult(); }
|
||||
|
||||
/// True if the future already has a callback set
|
||||
bool hasCallback() const { return getState().hasCallback(); }
|
||||
|
||||
/// True if the result is a value (not an exception)
|
||||
bool hasValue() const {
|
||||
TRI_ASSERT(isReady());
|
||||
|
|
|
@ -23,6 +23,10 @@
|
|||
#ifndef ARANGOD_FUTURES_UTILITIES_H
|
||||
#define ARANGOD_FUTURES_UTILITIES_H 1
|
||||
|
||||
#include <algorithm>
|
||||
#include <chrono>
|
||||
#include <condition_variable>
|
||||
#include <mutex>
|
||||
#include <vector>
|
||||
|
||||
#include "Futures/Future.h"
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dan Larkin-York
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include <ostream>
|
||||
#include <string>
|
||||
|
||||
#include "CommonDefines.h"
|
||||
|
||||
#include "Basics/StaticStrings.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace rest {
|
||||
|
||||
std::string contentTypeToString(ContentType type) {
|
||||
switch (type) {
|
||||
case ContentType::VPACK:
|
||||
return StaticStrings::MimeTypeVPack;
|
||||
case ContentType::TEXT:
|
||||
return StaticStrings::MimeTypeText;
|
||||
case ContentType::HTML:
|
||||
return StaticStrings::MimeTypeHtml;
|
||||
case ContentType::DUMP:
|
||||
return StaticStrings::MimeTypeDump;
|
||||
case ContentType::CUSTOM:
|
||||
return ""; // use value from headers
|
||||
case ContentType::UNSET:
|
||||
case ContentType::JSON:
|
||||
default:
|
||||
return StaticStrings::MimeTypeJson;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace rest
|
||||
} // namespace arangodb
|
|
@ -78,6 +78,8 @@ enum class ContentType {
|
|||
UNSET
|
||||
};
|
||||
|
||||
std::string contentTypeToString(ContentType type);
|
||||
|
||||
enum class EncodingType {
|
||||
DEFLATE,
|
||||
UNSET
|
||||
|
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue