mirror of https://gitee.com/bigwinds/arangodb
removed shardLocation key
This commit is contained in:
parent
0fdcd88770
commit
5a365cb2b1
|
@ -233,7 +233,6 @@ ClusterInfo::ClusterInfo ()
|
|||
_uniqid._currentValue = _uniqid._upperValue = 0ULL;
|
||||
|
||||
loadServers();
|
||||
loadShards();
|
||||
loadCollections();
|
||||
}
|
||||
|
||||
|
@ -334,6 +333,7 @@ void ClusterInfo::loadCollections () {
|
|||
|
||||
WRITE_LOCKER(_lock);
|
||||
_collections.clear();
|
||||
_shardIds.clear();
|
||||
|
||||
std::map<std::string, std::string>::const_iterator it;
|
||||
for (it = collections.begin(); it != collections.end(); ++it) {
|
||||
|
@ -357,11 +357,11 @@ void ClusterInfo::loadCollections () {
|
|||
|
||||
// check whether we have created an entry for the database already
|
||||
AllCollections::iterator it2 = _collections.find(database);
|
||||
CollectionInfo collectionData((*it).second);
|
||||
|
||||
if (it2 == _collections.end()) {
|
||||
// not yet, so create an entry for the database
|
||||
DatabaseCollections empty;
|
||||
CollectionInfo collectionData((*it).second);
|
||||
empty.insert(std::make_pair<CollectionID, CollectionInfo>(collection, collectionData));
|
||||
empty.insert(std::make_pair<CollectionID, CollectionInfo>(collectionData.name(), collectionData));
|
||||
|
||||
|
@ -369,10 +369,20 @@ void ClusterInfo::loadCollections () {
|
|||
}
|
||||
else {
|
||||
// insert the collection into the existing map
|
||||
CollectionInfo collectionData((*it).second);
|
||||
(*it2).second.insert(std::make_pair<CollectionID, CollectionInfo>(collection, collectionData));
|
||||
(*it2).second.insert(std::make_pair<CollectionID, CollectionInfo>(collectionData.name(), collectionData));
|
||||
}
|
||||
|
||||
std::map<std::string, std::string> shards = collectionData.shardIds();
|
||||
std::map<std::string, std::string>::const_iterator it3 = shards.begin();
|
||||
|
||||
while (it3 != shards.end()) {
|
||||
const std::string shardId = (*it3).first;
|
||||
const std::string serverId = (*it3).second;
|
||||
|
||||
_shardIds.insert(std::make_pair<ShardID, ServerID>(shardId, serverId));
|
||||
++it3;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
|
@ -513,45 +523,6 @@ std::string ClusterInfo::getTargetServerEndpoint (ServerID const& serverID) {
|
|||
return "";
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief (re-)load the information about shards from the agency
|
||||
/// Usually one does not have to call this directly.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void ClusterInfo::loadShards () {
|
||||
while (true) {
|
||||
AgencyCommResult result;
|
||||
|
||||
{
|
||||
AgencyCommLocker locker("Current", "READ");
|
||||
result = _agency.getValues("Current/ShardLocation", true);
|
||||
}
|
||||
|
||||
if (result.successful()) {
|
||||
std::map<std::string, std::string> shards;
|
||||
|
||||
if (result.flattenJson(shards, "Current/ShardLocation/", false)) {
|
||||
LOG_TRACE("Current/ShardLocation loaded successfully");
|
||||
|
||||
// now update our internals with the results
|
||||
WRITE_LOCKER(_lock);
|
||||
_shardIds.clear();
|
||||
|
||||
std::map<ShardID, ServerID>::const_iterator it;
|
||||
for (it = shards.begin(); it != shards.end(); ++it) {
|
||||
_shardIds.insert(std::make_pair<ShardID, ServerID>((*it).first, (*it).second));
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_TRACE("Error while loading Current/ServersRegistered");
|
||||
|
||||
usleep(1000);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief find the server who is responsible for a shard
|
||||
/// If it is not found in the cache, the cache is reloaded once, if
|
||||
|
@ -571,8 +542,8 @@ ServerID ClusterInfo::getResponsibleServer (ShardID const& shardID) {
|
|||
}
|
||||
}
|
||||
|
||||
// must call loadShards outside the lock
|
||||
loadShards();
|
||||
// must call loadCollections outside the lock
|
||||
loadCollections();
|
||||
}
|
||||
|
||||
return ServerID("");
|
||||
|
|
|
@ -303,13 +303,6 @@ namespace triagens {
|
|||
|
||||
std::string getTargetServerEndpoint (ServerID const&);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief (re-)load the information about shards from the agency
|
||||
/// Usually one does not have to call this directly.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void loadShards ();
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief find the server who is responsible for a shard
|
||||
/// If it is not found in the cache, the cache is reloaded once, if
|
||||
|
|
|
@ -16,8 +16,6 @@ curl --silent --dump - -L -X PUT "$ETCD/v2/keys/$NAME/Target/MapIDToEndpoint/Pav
|
|||
curl --silent --dump - -L -X PUT "$ETCD/v2/keys/$NAME/Target/MapIDToEndpoint/Perry" -d "value=tcp://127.0.0.1:8531" || exit 1
|
||||
curl --silent --dump - -L -X PUT "$ETCD/v2/keys/$NAME/Target/MapIDToEndpoint/Claus" -d "value=tcp://127.0.0.1:8529" || exit 1
|
||||
|
||||
curl --silent --dump - -L -X PUT "$ETCD/v2/keys/$NAME/Current/ShardLocation/shardBlubb" -d "value=Pavel" || exit 1
|
||||
|
||||
echo
|
||||
echo start arangod with:
|
||||
echo "Pavel: bin/arangod --cluster.my-id Pavel --cluster.agency-prefix $NAME --cluster.agency-endpoint tcp://127.0.0.1:4001 --server.endpoint tcp://127.0.0.1:8530 data-pavel"
|
||||
|
|
|
@ -77,16 +77,13 @@ function ClusterEnabledSuite () {
|
|||
"Target/DBServers",
|
||||
"Target/Coordinators",
|
||||
"Target/Collections",
|
||||
"Target/ShardLocation",
|
||||
"Plan/DBServers",
|
||||
"Plan/Coordinators",
|
||||
"Plan/Collections",
|
||||
"Plan/ShardLocation",
|
||||
"Current/ServersRegistered",
|
||||
"Current/DBServers",
|
||||
"Current/Coordinators",
|
||||
"Current/Collections",
|
||||
"Current/ShardLocation",
|
||||
"Current/ShardsCopied"
|
||||
].forEach(function (d) {
|
||||
try {
|
||||
|
@ -299,14 +296,23 @@ function ClusterEnabledSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testGetResponsibleServer : function () {
|
||||
assertTrue(agency.set("Current/ShardLocation/s1", "myself"));
|
||||
var collection = {
|
||||
id: "12345868390663",
|
||||
name: "mycollection_test",
|
||||
type: 3,
|
||||
status: 2, // LOADED
|
||||
shardKeys: [ "_key", "a", "bc" ],
|
||||
shards: { "s1" : "myself" }
|
||||
};
|
||||
|
||||
assertTrue(agency.set("Current/Collections/test/" + collection.id, JSON.stringify(collection)));
|
||||
ci.flush();
|
||||
|
||||
assertEqual("myself", ci.getResponsibleServer("s1"));
|
||||
assertEqual("", ci.getResponsibleServer("s9999"));
|
||||
|
||||
assertTrue(agency.set("Current/ShardLocation/s1", "other"));
|
||||
assertTrue(agency.set("Current/ShardLocation/s2", "myself"));
|
||||
|
||||
collection.shards = { s1: "other", s2: "myself" };
|
||||
assertTrue(agency.set("Current/Collections/test/" + collection.id, JSON.stringify(collection)));
|
||||
ci.flush();
|
||||
|
||||
assertEqual("other", ci.getResponsibleServer("s1"));
|
||||
|
|
|
@ -50,7 +50,6 @@ if [ "$1" == "init" ] ; then
|
|||
set Target/Collections/_system
|
||||
set Target/Collections/_system/Version 1
|
||||
set Target/Collections/_system/Lock UNLOCKED
|
||||
set Target/ShardLocation
|
||||
|
||||
set Plan/Version 1
|
||||
set Plan/DBServers
|
||||
|
@ -59,7 +58,6 @@ if [ "$1" == "init" ] ; then
|
|||
set Plan/Collections/_system
|
||||
set Plan/Collections/_system/Version 1
|
||||
set Plan/Collections/_system/Lock UNLOCKED
|
||||
set Plan/ShardLocation
|
||||
|
||||
set Current/Version 1
|
||||
set Current/ServersRegistered
|
||||
|
@ -69,7 +67,6 @@ if [ "$1" == "init" ] ; then
|
|||
set Current/Collections/_system
|
||||
set Current/Collections/_system/Version 1
|
||||
set Current/Collections/_system/Lock UNLOCKED
|
||||
set Current/ShardLocation
|
||||
|
||||
set Current/ShardsCopied
|
||||
|
||||
|
|
Loading…
Reference in New Issue