1
0
Fork 0

[3.4] Early sort out system collections for maintenance (#7589)

This commit is contained in:
Kaveh Vahedipour 2018-12-10 15:16:35 +01:00 committed by Jan
parent f5f059e715
commit 1b75220a1b
8 changed files with 6895 additions and 7418 deletions

View File

@ -86,37 +86,39 @@ Result DBServerAgencySync::getLocalCollections(VPackBuilder& collections) {
auto cols = vocbase->collections(false);
for (auto const& collection : cols) {
collections.add(VPackValue(collection->name()));
VPackObjectBuilder col(&collections);
collection->properties(collections,true,false);
auto const& folls = collection->followers();
std::string const theLeader = folls->getLeader();
bool theLeaderTouched = folls->getLeaderTouched();
// Note that whenever theLeader was set explicitly since the collection
// object was created, we believe it. Otherwise, we do not accept
// that we are the leader. This is to circumvent the problem that
// after a restart we would implicitly be assumed to be the leader.
collections.add("theLeader", VPackValue(theLeaderTouched ? theLeader : "NOT_YET_TOUCHED"));
collections.add("theLeaderTouched", VPackValue(theLeaderTouched));
if (theLeader.empty() && theLeaderTouched) {
// we are the leader ourselves
// In this case we report our in-sync followers here in the format
// of the agency: [ leader, follower1, follower2, ... ]
collections.add(VPackValue("servers"));
{ VPackArrayBuilder guard(&collections);
collections.add(VPackValue(arangodb::ServerState::instance()->getId()));
std::shared_ptr<std::vector<ServerID> const> srvs = folls->get();
for (auto const& s : *srvs) {
collections.add(VPackValue(s));
if (!collection->system()) {
collections.add(VPackValue(collection->name()));
VPackObjectBuilder col(&collections);
collection->properties(collections,true,false);
auto const& folls = collection->followers();
std::string const theLeader = folls->getLeader();
bool theLeaderTouched = folls->getLeaderTouched();
// Note that whenever theLeader was set explicitly since the collection
// object was created, we believe it. Otherwise, we do not accept
// that we are the leader. This is to circumvent the problem that
// after a restart we would implicitly be assumed to be the leader.
collections.add("theLeader", VPackValue(theLeaderTouched ? theLeader : "NOT_YET_TOUCHED"));
collections.add("theLeaderTouched", VPackValue(theLeaderTouched));
if (theLeader.empty() && theLeaderTouched) {
// we are the leader ourselves
// In this case we report our in-sync followers here in the format
// of the agency: [ leader, follower1, follower2, ... ]
collections.add(VPackValue("servers"));
{ VPackArrayBuilder guard(&collections);
collections.add(VPackValue(arangodb::ServerState::instance()->getId()));
std::shared_ptr<std::vector<ServerID> const> srvs = folls->get();
for (auto const& s : *srvs) {
collections.add(VPackValue(s));
}
}
}
}

View File

@ -468,10 +468,8 @@ arangodb::Result arangodb::maintenance::diffPlanLocal (
if (pdbs.hasKey(dbname)) { // if in plan
for (auto const& sh : VPackObjectIterator(db.value)) { // for each local shard
std::string shName = sh.key.copyString();
if (shName.front() != '_') { // exclude local system shards/collections
handleLocalShard(dbname, shName, sh.value, shardMap.slice(), commonShrds,
indis, serverId, actions);
}
handleLocalShard(dbname, shName, sh.value, shardMap.slice(), commonShrds,
indis, serverId, actions);
}
}
}
@ -832,9 +830,6 @@ arangodb::Result arangodb::maintenance::reportInCurrent(
for (auto const& shard : VPackObjectIterator(database.value)) {
auto const shName = shard.key.copyString();
if (shName.at(0) == '_') { // local system collection
continue;
}
auto const shSlice = shard.value;
auto const colName = shSlice.get(StaticStrings::DataSourcePlanId).copyString();

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,56 +1,71 @@
R"=(
{
"Health": {
"PRMR-d6e6b701-e455-4f8f-86cf-a87faaf235da": {
"AdvertisedEndpoint": "",
"Timestamp": "2018-10-01T10:17:22Z",
"SyncStatus": "SERVING",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"ShortName": "DBServer0002",
"Endpoint": "tcp://[::1]:11198"
},
"PRMR-62eeb203-c38c-4879-b343-ca34633705cf": {
"AdvertisedEndpoint": "",
"Timestamp": "2018-10-01T10:17:23Z",
"SyncStatus": "SERVING",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"ShortName": "DBServer0003",
"Endpoint": "tcp://[::1]:11196"
},
"PRMR-498a2f3d-9700-4917-afa9-ec317f6e2e3d": {
"AdvertisedEndpoint": "",
"Timestamp": "2018-10-01T10:17:22Z",
"SyncStatus": "SERVING",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"Status": "GOOD",
"ShortName": "DBServer0001",
"Endpoint": "tcp://[::1]:11197"
},
"CRDN-8d79ded3-9062-4521-8fa6-7ef3aaf144ad": {
"AdvertisedEndpoint": "",
"SyncStatus": "SERVING",
"Timestamp": "2018-10-01T10:17:24Z",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"ShortName": "Coordinator0002",
"Status": "GOOD",
"Endpoint": "tcp://[::1]:11098"
},
"CRDN-383e3a90-2b26-49fe-9974-f9fce7b5eabf": {
"AdvertisedEndpoint": "",
"SyncStatus": "SERVING",
"Timestamp": "2018-10-01T10:17:24Z",
"Host": "ac8ddefc7d1f4364ba655b4debcd076f",
"CRDN-122291f4-3bfe-4037-b51d-b6b1066604b0": {
"Endpoint": "tcp://[::1]:11098",
"ShortName": "Coordinator0001",
"Status": "GOOD",
"Endpoint": "tcp://[::1]:11097"
"Version": "3.4.0-rc.5",
"SyncTime": "2018-12-03T09:58:57Z",
"Timestamp": "2018-12-03T09:58:58Z",
"Host": "7e56da52e94b4c5190f0e8d559243c42",
"SyncStatus": "SERVING",
"LastAckedTime": "2018-12-03T09:58:58Z",
"Engine": "rocksdb"
},
"PRMR-5dcd6f32-1207-47b2-bdc4-4ab7d23bbc23": {
"Endpoint": "tcp://[::1]:11198",
"ShortName": "DBServer0001",
"Status": "GOOD",
"Version": "3.4.0-rc.5",
"SyncTime": "2018-12-03T09:58:56Z",
"Timestamp": "2018-12-03T09:58:57Z",
"Host": "7e56da52e94b4c5190f0e8d559243c42",
"SyncStatus": "SERVING",
"LastAckedTime": "2018-12-03T09:58:57Z",
"Engine": "rocksdb"
},
"CRDN-33a81ec1-e22a-43fe-997c-6cc766208cb0": {
"Endpoint": "tcp://[::1]:11097",
"ShortName": "Coordinator0002",
"Status": "GOOD",
"Version": "3.4.0-rc.5",
"SyncTime": "2018-12-03T09:58:57Z",
"Timestamp": "2018-12-03T09:58:58Z",
"Host": "7e56da52e94b4c5190f0e8d559243c42",
"SyncStatus": "SERVING",
"LastAckedTime": "2018-12-03T09:58:58Z",
"Engine": "rocksdb"
},
"PRMR-16df9cb4-e499-4193-92fb-db98e891949f": {
"Endpoint": "tcp://[::1]:11197",
"ShortName": "DBServer0002",
"Status": "GOOD",
"Version": "3.4.0-rc.5",
"SyncTime": "2018-12-03T09:58:57Z",
"Timestamp": "2018-12-03T09:58:58Z",
"Host": "7e56da52e94b4c5190f0e8d559243c42",
"SyncStatus": "SERVING",
"LastAckedTime": "2018-12-03T09:58:58Z",
"Engine": "rocksdb"
},
"PRMR-1296936a-96c5-4304-9ecd-fb623dd98f86": {
"Endpoint": "tcp://[::1]:11196",
"ShortName": "DBServer0003",
"Status": "GOOD",
"Version": "3.4.0-rc.5",
"SyncTime": "2018-12-03T09:58:58Z",
"Timestamp": "2018-12-03T09:58:59Z",
"Host": "7e56da52e94b4c5190f0e8d559243c42",
"SyncStatus": "SERVING",
"LastAckedTime": "2018-12-03T09:58:59Z",
"Engine": "rocksdb"
}
},
"DBServers": {},
"State": {
"Mode": "Normal",
"Timestamp": "2018-10-01T10:17:22Z"
"Timestamp": "2018-12-03T09:58:56Z"
},
"Shards": {}
}