mirror of https://gitee.com/bigwinds/arangodb
Add methods in ClusterInfo to create and drop views.
This commit is contained in:
parent
33ce6acc2c
commit
ce8db24975
|
@ -1773,6 +1773,146 @@ Result ClusterInfo::setCollectionPropertiesCoordinator(
|
|||
return Result(TRI_ERROR_CLUSTER_AGENCY_COMMUNICATION_FAILED, res.errorMessage());
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create view in coordinator, the return value is an ArangoDB
|
||||
/// error code and the errorMsg is set accordingly. One possible error
|
||||
/// is a timeout, a timeout of 0.0 means no timeout.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int ClusterInfo::createViewCoordinator(std::string const& databaseName,
|
||||
std::string const& viewID,
|
||||
VPackSlice const& json,
|
||||
std::string& errorMsg) {
|
||||
using arangodb::velocypack::Slice;
|
||||
|
||||
AgencyComm ac;
|
||||
|
||||
std::string const name =
|
||||
arangodb::basics::VelocyPackHelper::getStringValue(json, "name", "");
|
||||
|
||||
{
|
||||
// check if a view with the same name is already planned
|
||||
loadPlan();
|
||||
|
||||
READ_LOCKER(readLocker, _planProt.lock);
|
||||
AllViews::const_iterator it = _plannedViews.find(databaseName);
|
||||
if (it != _plannedViews.end()) {
|
||||
DatabaseViews::const_iterator it2 = (*it).second.find(name);
|
||||
|
||||
if (it2 != (*it).second.end()) {
|
||||
// view already exists!
|
||||
events::CreateView(name, TRI_ERROR_ARANGO_DUPLICATE_NAME);
|
||||
return TRI_ERROR_ARANGO_DUPLICATE_NAME;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// mop: why do these ask the agency instead of checking cluster info?
|
||||
if (!ac.exists("Plan/Databases/" + databaseName)) {
|
||||
events::CreateView(name, TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
|
||||
return setErrormsg(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND, errorMsg);
|
||||
}
|
||||
|
||||
if (ac.exists("Plan/Views/" + databaseName + "/" + viewID)) {
|
||||
events::CreateView(name, TRI_ERROR_CLUSTER_COLLECTION_ID_EXISTS);
|
||||
return setErrormsg(TRI_ERROR_CLUSTER_COLLECTION_ID_EXISTS, errorMsg);
|
||||
}
|
||||
|
||||
std::vector<AgencyOperation> opers (
|
||||
{ AgencyOperation("Plan/Views/" + databaseName + "/" + viewID,
|
||||
AgencyValueOperationType::SET, json),
|
||||
AgencyOperation("Plan/Version", AgencySimpleOperationType::INCREMENT_OP)});
|
||||
|
||||
std::vector<AgencyPrecondition> precs;
|
||||
precs.emplace_back(
|
||||
AgencyPrecondition("Plan/Views/" + databaseName + "/" + viewID,
|
||||
AgencyPrecondition::Type::EMPTY, true));
|
||||
|
||||
AgencyWriteTransaction transaction(opers, precs);
|
||||
|
||||
auto res = ac.sendTransactionWithFailover(transaction);
|
||||
|
||||
// Only if not precondition failed
|
||||
if (!res.successful()) {
|
||||
if (res.httpCode() ==
|
||||
(int)arangodb::rest::ResponseCode::PRECONDITION_FAILED) {
|
||||
errorMsg += std::string("Precondition that view ") + name + " with ID "
|
||||
+ viewID + " does not yet exist failed. Cannot create view.";
|
||||
|
||||
// Dump agency plan:
|
||||
auto result = res.slice();
|
||||
AgencyCommResult ag = ac.getValues("/");
|
||||
if (ag.successful()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Agency dump:\n"
|
||||
<< ag.slice().toJson();
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Could not get agency dump!";
|
||||
}
|
||||
|
||||
return TRI_ERROR_CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN;
|
||||
} else {
|
||||
errorMsg += std::string("file: ") + __FILE__ +
|
||||
" line: " + std::to_string(__LINE__);
|
||||
errorMsg += " HTTP code: " + std::to_string(res.httpCode());
|
||||
errorMsg += " error message: " + res.errorMessage();
|
||||
errorMsg += " error details: " + res.errorDetails();
|
||||
errorMsg += " body: " + res.body();
|
||||
events::CreateCollection(
|
||||
name, TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN);
|
||||
return TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN;
|
||||
}
|
||||
}
|
||||
|
||||
// Update our cache:
|
||||
loadPlan();
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief drop view in coordinator, the return value is an ArangoDB
|
||||
/// error code and the errorMsg is set accordingly.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int ClusterInfo::dropViewCoordinator(
|
||||
std::string const& databaseName, std::string const& viewID,
|
||||
std::string& errorMsg) {
|
||||
|
||||
AgencyComm ac;
|
||||
AgencyCommResult res;
|
||||
|
||||
// Transact to agency
|
||||
AgencyOperation delPlanCollection(
|
||||
"Plan/Views/" + databaseName + "/" + viewID,
|
||||
AgencySimpleOperationType::DELETE_OP);
|
||||
AgencyOperation incrementVersion("Plan/Version",
|
||||
AgencySimpleOperationType::INCREMENT_OP);
|
||||
AgencyPrecondition precondition = AgencyPrecondition(
|
||||
"Plan/Databases/" + databaseName, AgencyPrecondition::Type::EMPTY, false);
|
||||
AgencyPrecondition pre2 = AgencyPrecondition(
|
||||
"Plan/Views/" + databaseName + "/" + viewID,
|
||||
AgencyPrecondition::Type::EMPTY, false);
|
||||
AgencyWriteTransaction trans({delPlanCollection, incrementVersion},
|
||||
{precondition, pre2});
|
||||
res = ac.sendTransactionWithFailover(trans);
|
||||
|
||||
if (!res.successful()) {
|
||||
AgencyCommResult ag = ac.getValues("");
|
||||
if (ag.successful()) {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Agency dump:\n"
|
||||
<< ag.slice().toJson();
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::CLUSTER) << "Could not get agency dump!";
|
||||
}
|
||||
}
|
||||
|
||||
// Update our own cache:
|
||||
loadPlan();
|
||||
|
||||
events::DropView(collectionID, TRI_ERROR_NO_ERROR);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set collection status in coordinator
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -411,6 +411,23 @@ class ClusterInfo {
|
|||
std::string const& collectionID,
|
||||
TRI_vocbase_col_status_e status);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create view in coordinator
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int createViewCoordinator(std::string const& databaseName,
|
||||
std::string const& viewID,
|
||||
arangodb::velocypack::Slice const& json,
|
||||
std::string& errorMsg);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief drop view in coordinator
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int dropViewCoordinator(std::string const& databaseName,
|
||||
std::string const& viewID,
|
||||
std::string& errorMsg);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ensure an index in coordinator.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -178,6 +178,7 @@
|
|||
"ERROR_CLUSTER_DISTRIBUTE_SHARDS_LIKE_NUMBER_OF_SHARDS" : { "code" : 1494, "message" : "conflicting shard number with distributeShardsLike parameter assignment" },
|
||||
"ERROR_CLUSTER_LEADERSHIP_CHALLENGE_ONGOING" : { "code" : 1495, "message" : "leadership challenge is ongoing" },
|
||||
"ERROR_CLUSTER_NOT_LEADER" : { "code" : 1496, "message" : "not a leader" },
|
||||
"ERROR_CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN" : { "code" : 1497, "message" : "could not create view in plan" },
|
||||
"ERROR_QUERY_KILLED" : { "code" : 1500, "message" : "query killed" },
|
||||
"ERROR_QUERY_PARSE" : { "code" : 1501, "message" : "%s" },
|
||||
"ERROR_QUERY_EMPTY" : { "code" : 1502, "message" : "query is empty" },
|
||||
|
|
|
@ -216,6 +216,7 @@ ERROR_CLUSTER_DISTRIBUTE_SHARDS_LIKE_REPLICATION_FACTOR,1493,"conflicting replic
|
|||
ERROR_CLUSTER_DISTRIBUTE_SHARDS_LIKE_NUMBER_OF_SHARDS,1494,"conflicting shard number with distributeShardsLike parameter assignment","Will be raised if intended number of shards does not match that of the prototype shard given in ditributeShardsLike parameter."
|
||||
ERROR_CLUSTER_LEADERSHIP_CHALLENGE_ONGOING,1495,"leadership challenge is ongoing","Will be raised when servers are currently competing for leadership, and the result is still unknown."
|
||||
ERROR_CLUSTER_NOT_LEADER,1496,"not a leader","Will be raised when an operation is sent to a non-leading server."
|
||||
ERROR_CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN,1497,"could not create view in plan","Will be raised when a coordinator in a cluster cannot create an entry for a new view in the Plan hierarchy in the agency."
|
||||
|
||||
################################################################################
|
||||
## ArangoDB query errors
|
||||
|
|
|
@ -177,6 +177,7 @@ void TRI_InitializeErrorMessages() {
|
|||
REG_ERROR(ERROR_CLUSTER_DISTRIBUTE_SHARDS_LIKE_NUMBER_OF_SHARDS, "conflicting shard number with distributeShardsLike parameter assignment");
|
||||
REG_ERROR(ERROR_CLUSTER_LEADERSHIP_CHALLENGE_ONGOING, "leadership challenge is ongoing");
|
||||
REG_ERROR(ERROR_CLUSTER_NOT_LEADER, "not a leader");
|
||||
REG_ERROR(ERROR_CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN, "could not create view in plan");
|
||||
REG_ERROR(ERROR_QUERY_KILLED, "query killed");
|
||||
REG_ERROR(ERROR_QUERY_PARSE, "%s");
|
||||
REG_ERROR(ERROR_QUERY_EMPTY, "query is empty");
|
||||
|
|
|
@ -945,6 +945,12 @@ constexpr int TRI_ERROR_CLUSTER_LEADERSHIP_CHALLENGE_ONGOING
|
|||
/// Will be raised when an operation is sent to a non-leading server.
|
||||
constexpr int TRI_ERROR_CLUSTER_NOT_LEADER = 1496;
|
||||
|
||||
/// 1497: ERROR_CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN
|
||||
/// "could not create view in plan"
|
||||
/// Will be raised when a coordinator in a cluster cannot create an entry for a
|
||||
/// new view in the Plan hierarchy in the agency.
|
||||
constexpr int TRI_ERROR_CLUSTER_COULD_NOT_CREATE_VIEW_IN_PLAN = 1497;
|
||||
|
||||
/// 1500: ERROR_QUERY_KILLED
|
||||
/// "query killed"
|
||||
/// Will be raised when a running query is killed by an explicit admin command.
|
||||
|
|
Loading…
Reference in New Issue