1
0
Fork 0

Feature 3.4/detailed get maintenance feature (#6666)

* backport of test data generation for maintenance from devel
This commit is contained in:
Kaveh Vahedipour 2018-10-01 14:50:25 +02:00 committed by Max Neunhöffer
parent 7a1100318e
commit 8b9a0f6a25
13 changed files with 9599 additions and 6989 deletions

View File

@ -57,7 +57,7 @@ void DBServerAgencySync::work() {
_heartbeat->dispatchedJobResult(result);
}
Result getLocalCollections(VPackBuilder& collections) {
Result DBServerAgencySync::getLocalCollections(VPackBuilder& collections) {
using namespace arangodb::basics;
Result result;
@ -72,7 +72,6 @@ Result getLocalCollections(VPackBuilder& collections) {
return Result(TRI_ERROR_INTERNAL, "Failed to get feature database");
}
collections.clear();
VPackObjectBuilder c(&collections);
for (auto const& database : Databases::list()) {
@ -170,6 +169,7 @@ DBServerAgencySyncResult DBServerAgencySync::execute() {
LOG_TOPIC(DEBUG, Logger::MAINTENANCE) << "DBServerAgencySync::phaseOne done";
LOG_TOPIC(DEBUG, Logger::MAINTENANCE) << "DBServerAgencySync::phaseTwo";
local.clear();
glc = getLocalCollections(local);
// We intentionally refetch local collections here, such that phase 2
// can already see potential changes introduced by phase 1. The two

View File

@ -25,6 +25,8 @@
#define ARANGOD_CLUSTER_DB_SERVER_AGENCY_SYNC_H 1
#include "Basics/Common.h"
#include "Basics/Result.h"
#include "Basics/VelocyPackHelper.h"
namespace arangodb {
class HeartbeatThread;
@ -56,6 +58,12 @@ class DBServerAgencySync {
public:
void work();
/**
* @brief Get copy of current local state
* @param collections Builder to fill to
*/
static arangodb::Result getLocalCollections(VPackBuilder& collections);
private:
DBServerAgencySyncResult execute();

View File

@ -458,15 +458,20 @@ std::shared_ptr<Action> MaintenanceFeature::findReadyAction(
VPackBuilder MaintenanceFeature::toVelocyPack() const {
VPackBuilder vb;
toVelocyPack(vb);
return vb;
}
void MaintenanceFeature::toVelocyPack(VPackBuilder& vb) const {
READ_LOCKER(rLock, _actionRegistryLock);
{ VPackArrayBuilder ab(&vb);
for (auto const& action : _actionRegistry ) {
action->toVelocyPack(vb);
} // for
}
return vb;
} // MaintenanceFeature::toVelocyPack
#if 0

View File

@ -131,6 +131,9 @@ public:
/// @brief Create a VPackBuilder object with snapshot of current action registry
VPackBuilder toVelocyPack() const;
/// @brief Fill the envelope with snapshot of current action registry
void toVelocyPack(VPackBuilder& envelope) const;
/// @brief Returns json array of all MaintenanceActions within the deque
Result toJson(VPackBuilder & builder);

View File

@ -27,6 +27,7 @@
#include "Basics/StringUtils.h"
#include "Basics/conversions.h"
#include "Cluster/MaintenanceFeature.h"
#include "Cluster/DBServerAgencySync.h"
#include "Rest/HttpRequest.h"
#include "Rest/HttpResponse.h"
@ -152,10 +153,22 @@ bool MaintenanceRestHandler::parsePutBody(VPackSlice const & parameters) {
void MaintenanceRestHandler::getAction() {
// build the action
auto maintenance = ApplicationServer::getFeature<MaintenanceFeature>("Maintenance");
auto maintenance =
ApplicationServer::getFeature<MaintenanceFeature>("Maintenance");
VPackBuilder registry = maintenance->toVelocyPack();
generateResult(rest::ResponseCode::OK, registry.slice());
bool found;
std::string const& detailsStr = _request->value("details", found);
VPackBuilder builder;
{ VPackObjectBuilder o(&builder);
builder.add(VPackValue("registry"));
maintenance->toVelocyPack(builder);
if (found && StringUtils::boolean(detailsStr)) {
builder.add(VPackValue("state"));
DBServerAgencySync::getLocalCollections(builder);
}}
generateResult(rest::ResponseCode::OK, builder.slice());
} // MaintenanceRestHandler::getAction

View File

@ -1,22 +1,69 @@
#!/bin/bash
# FIXMEMAINTENANCE: please add a couple of lines about why this script exists
#
# Generate test data for maintenance unit tests
#
# check if ports are available
lsof -i -P -n| awk '{print $9}' | grep -E "6568|11097|11098|11196|11197|1198" > /dev/null
if [ $? -ne 1 ]; then
echo "One or more of ports 6568, 11097, 11098, 11196, 11197, 1198 are blocked for startLocalCluster"
exit 1;
fi
# start cluster for it all
scripts/startLocalCluster.sh -d 3 -c 2 -b 2567
# collections for system database
echo "== Creating collection _system/bar =="
curl [::1]:11097/_api/collection -sd \
'{"name":"bar", "type": 3, "numberOfShards": 9, "replicationFactor": 2}' \
| jq -c
curl [::1]:11097/_api/index?collection=bar -sd \
'{"fields":["gi"],"geoJson":false,"sparse":true,"type":"geo","unique":false}' \
|jq -c
curl [::1]:11097/_api/index?collection=bar -sd \
'{"fields":["gij"],"geoJson":true,"sparse":true,"type":"geo","unique":false}'\
|jq -c
curl [::1]:11097/_api/index?collection=bar -sd \
'{"deduplicate":false,"fields":["hi","_key"],"sparse":false,"type":"hash","unique":true}'|jq -c
curl [::1]:11097/_api/index?collection=bar -sd \
'{"deduplicate":false,"fields":["pi"],"sparse":true,"type":"persistent","unique":false}'|jq -c
curl [::1]:11097/_api/index?collection=bar -sd \
'{"fields":["fi"],"id":"2010132","minLength":3,"sparse":true,"type":"fulltext","unique":false}'|jq -c
curl [::1]:11097/_api/index?collection=bar -sd \
'{"deduplicate":true,"fields":["sli"],"sparse":false,"type":"skiplist","unique":false}'
echo "== Creating collection _system/baz =="
curl [::1]:11097/_api/collection -sd \
'{"name":"baz", "type": 2, "numberOfShards": 1, "replicationFactor": 2}' | jq -c
# create foo database
echo "== Creating database foo =="
curl [::1]:11097/_api/database -sd '{"name":"foo"}' | jq -c
echo "== Creating collection foo/foobar =="
curl [::1]:11097/_db/foo/_api/collection -sd \
'{"name":"foobar", "type": 3, "numberOfShards": 4, "replicationFactor": 3}' \
| jq -c
echo "== Creating collection foo/foobaz =="
curl [::1]:11097/_db/foo/_api/collection -sd \
'{"name":"foobaz", "type": 2, "numberOfShards": 6, "replicationFactor": 2}' \
| jq -c
header="R\"=("
footer=")=\""
outfile=Plan.json
echo $header > $outfile
curl -s localhost:4001/_api/agency/read -d'[["/arango/Plan"]]'|jq .[0].arango.Plan >> $outfile
curl [::1]:6568/_api/agency/read -sLd'[["/arango/Plan"]]'|jq .[0].arango.Plan >> $outfile
echo $footer >> $outfile
outfile=Current.json
echo $header > $outfile
curl -s localhost:4001/_api/agency/read -d'[["/arango/Current"]]'|jq .[0].arango.Current >> $outfile
curl [::1]:6568/_api/agency/read -sLd'[["/arango/Current"]]'|jq .[0].arango.Current >> $outfile
echo $footer >> $outfile
outfile=Supervision.json
echo $header > $outfile
supervision=$(curl -s localhost:4001/_api/agency/read -d'[["/arango/Supervision"]]'|jq .[0].arango.Supervision)
supervision=$(curl [::1]:6568/_api/agency/read -sLd'[["/arango/Supervision"]]'|jq .[0].arango.Supervision)
echo $supervision | jq .>> $outfile
echo $footer >> $outfile
@ -30,19 +77,12 @@ for i in $servers; do
tmpfile=$shortname.tmp
outfile=$shortname.json
echo "{" >> $tmpfile
j=0
for i in $dbs; do
if [ $j -gt 0 ]; then
echo -n "," >> $tmpfile
fi
echo -n "\"$i\" :" >> $tmpfile
curl -s $endpoint/_db/$i/_admin/execute?returnAsJSON=true -d 'return require("@arangodb/cluster").getLocalInfo()'|jq .result >> $tmpfile
(( j++ ))
done
echo "}" >> $tmpfile
curl -s $endpoint/_admin/actions?details=true|jq .state >> $tmpfile
echo "R\"=(" > $outfile
cat $tmpfile | jq . >> $outfile
echo ")=\"" >> $outfile
rm $tmpfile
done
# shutdown the cluster
scripts/shutdownLocalCluster.sh -d 3 -c 2 -b 2567

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -643,11 +643,11 @@ TEST_CASE("ActionPhaseOne", "[cluster][maintenance]") {
SECTION("Modify journalSize in plan should update the according collection") {
VPackBuilder v; v.add(VPackValue(0));
std::string dbname("foo");
for (auto node : localNodes) {
std::vector<ActionDescription> actions;
std::string dbname = "_system";
std::string prop = arangodb::maintenance::JOURNAL_SIZE;
auto cb =
@ -662,20 +662,19 @@ TEST_CASE("ActionPhaseOne", "[cluster][maintenance]") {
plan.toBuilder().slice(), node.second.toBuilder().slice(),
node.first, errors, actions);
/*
if (actions.size() != 1) {
std::cout << __FILE__ << ":" << __LINE__ << " " << actions << std::endl;
}
REQUIRE(actions.size() == 1);
for (auto const& action : actions) {
REQUIRE(action.name() == "UpdateCollection");
REQUIRE(action.get("shard") == shname);
REQUIRE(action.get("database") == dbname);
auto const props = action.properties();
}
*/
}
}
@ -728,12 +727,15 @@ TEST_CASE("ActionPhaseOne", "[cluster][maintenance]") {
for (auto& node : localNodes) {
std::vector<ActionDescription> actions;
node.second("db3") = node.second("_system");
node.second("db3") = node.second("foo");
arangodb::maintenance::diffPlanLocal(
plan.toBuilder().slice(), node.second.toBuilder().slice(),
node.first, errors, actions);
if (actions.size() != node.second("db3").children().size()) {
std::cout << __FILE__ << ":" << __LINE__ << " " << actions << std::endl;
}
REQUIRE(actions.size() == node.second("db3").children().size());
for (auto const& action : actions) {
REQUIRE(action.name() == "DropCollection");
@ -882,8 +884,8 @@ TEST_CASE("ActionPhaseTwo", "[cluster][maintenance]") {
REQUIRE(pt.isEmptyObject());
}
}
*/
}*/
}

File diff suppressed because it is too large Load Diff

View File

@ -1,43 +1,56 @@
R"=(
{
"Health": {
"PRMR-1b81e8d4-7119-49ba-a423-8684d59dde89": {
"PRMR-d6e6b701-e455-4f8f-86cf-a87faaf235da": {
"AdvertisedEndpoint": "",
"Timestamp": "2018-10-01T10:17:22Z",
"SyncStatus": "SERVING",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"Endpoint": "tcp://[::1]:8629",
"Timestamp": "2018-08-15T16:07:46Z",
"ShortName": "DBServer0001",
"SyncStatus": "SERVING"
},
"PRMR-57aa3986-2e78-4810-8000-7fd2f0693291": {
"Endpoint": "tcp://[::1]:8630",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"Timestamp": "2018-08-16T08:16:44Z",
"ShortName": "DBServer0002",
"SyncStatus": "SERVING"
"Endpoint": "tcp://[::1]:11198"
},
"PRMR-e786c6cb-92fc-45cc-85dd-71abafcdaa83": {
"Endpoint": "tcp://[::1]:8631",
"PRMR-62eeb203-c38c-4879-b343-ca34633705cf": {
"AdvertisedEndpoint": "",
"Timestamp": "2018-10-01T10:17:23Z",
"SyncStatus": "SERVING",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"Timestamp": "2018-08-16T08:16:44Z",
"ShortName": "DBServer0003",
"SyncStatus": "SERVING"
"Endpoint": "tcp://[::1]:11196"
},
"CRDN-42df19c3-73d5-48f4-b02e-09b29008eff8": {
"Endpoint": "tcp://[::1]:8530",
"PRMR-498a2f3d-9700-4917-afa9-ec317f6e2e3d": {
"AdvertisedEndpoint": "",
"Timestamp": "2018-10-01T10:17:22Z",
"SyncStatus": "SERVING",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"Timestamp": "2018-08-16T08:16:44Z",
"ShortName": "DBServer0001",
"Endpoint": "tcp://[::1]:11197"
},
"CRDN-8d79ded3-9062-4521-8fa6-7ef3aaf144ad": {
"AdvertisedEndpoint": "",
"SyncStatus": "SERVING",
"Timestamp": "2018-10-01T10:17:24Z",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"ShortName": "Coordinator0002",
"Status": "GOOD",
"Endpoint": "tcp://[::1]:11098"
},
"CRDN-383e3a90-2b26-49fe-9974-f9fce7b5eabf": {
"AdvertisedEndpoint": "",
"SyncStatus": "SERVING",
"Timestamp": "2018-10-01T10:17:24Z",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"ShortName": "Coordinator0001",
"SyncStatus": "SERVING"
"Status": "GOOD",
"Endpoint": "tcp://[::1]:11097"
}
},
"DBServers": {},
"State": {
"Mode": "Normal",
"Timestamp": "2018-08-15T16:07:46Z"
"Timestamp": "2018-10-01T10:17:22Z"
},
"Shards": {}
}