1
0
Fork 0

adjusted listDatabases and doesDatabaseExist

This commit is contained in:
Jan Steemann 2014-01-15 16:01:05 +01:00
parent 9335b1fd29
commit b0e4dc91dd
4 changed files with 106 additions and 44 deletions

View File

@ -340,6 +340,8 @@ ClusterInfo* ClusterInfo::instance () {
ClusterInfo::ClusterInfo () ClusterInfo::ClusterInfo ()
: _agency(), : _agency(),
_uniqid(), _uniqid(),
_plannedDatabases(),
_currentDatabases(),
_collectionsValid(false), _collectionsValid(false),
_serversValid(false), _serversValid(false),
_DBServersValid(false) { _DBServersValid(false) {
@ -411,27 +413,41 @@ void ClusterInfo::flush () {
/// @brief ask whether a cluster database exists /// @brief ask whether a cluster database exists
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
bool ClusterInfo::doesDatabaseExist (DatabaseID const& databaseID) { bool ClusterInfo::doesDatabaseExist (DatabaseID const& databaseID,
bool reload) {
int tries = 0; int tries = 0;
if (! _collectionsValid) { if (reload) {
loadCurrentCollections(); loadPlannedDatabases();
loadCurrentDatabases();
loadCurrentDBServers();
++tries; ++tries;
} }
while (++tries <= 2) { while (++tries <= 2) {
{ {
READ_LOCKER(_lock); READ_LOCKER(_lock);
// look up database by id const size_t expectedSize = _DBServers.size();
AllCollections::const_iterator it = _collections.find(databaseID);
if (it != _collections.end()) { // look up database by name
return true;
std::map<DatabaseID, TRI_json_t*>::const_iterator it = _plannedDatabases.find(databaseID);
if (it != _plannedDatabases.end()) {
// found the database in Plan
std::map<DatabaseID, std::map<ServerID, TRI_json_t*> >::const_iterator it2 = _currentDatabases.find(databaseID);
if (it2 != _currentDatabases.end()) {
// found the database in Current
return ((*it2).second.size() >= expectedSize);
}
} }
} }
// must load collections outside the lock loadPlannedDatabases();
loadCurrentCollections(); loadCurrentDatabases();
loadCurrentDBServers();
} }
return false; return false;
@ -441,18 +457,33 @@ bool ClusterInfo::doesDatabaseExist (DatabaseID const& databaseID) {
/// @brief get list of databases in the cluster /// @brief get list of databases in the cluster
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
vector<DatabaseID> ClusterInfo::listDatabases () { vector<DatabaseID> ClusterInfo::listDatabases (bool reload) {
vector<DatabaseID> res; vector<DatabaseID> result;
if (! _collectionsValid) { if (reload) {
loadCurrentCollections(); loadPlannedDatabases();
loadCurrentDatabases();
loadCurrentDBServers();
} }
AllCollections::const_iterator it; READ_LOCKER(_lock);
for (it = _collections.begin(); it != _collections.end(); ++it) { const size_t expectedSize = _DBServers.size();
res.push_back(it->first);
std::map<DatabaseID, TRI_json_t*>::const_iterator it = _plannedDatabases.begin();
while (it != _plannedDatabases.end()) {
std::map<DatabaseID, std::map<ServerID, TRI_json_t*> >::const_iterator it2 = _currentDatabases.find((*it).first);
if (it2 != _currentDatabases.end()) {
if ((*it2).second.size() >= expectedSize) {
result.push_back((*it).first);
} }
return res; }
++it;
}
return result;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -532,6 +563,8 @@ void ClusterInfo::loadPlannedDatabases () {
// steal the json // steal the json
(*it).second._json = 0; (*it).second._json = 0;
_plannedDatabases.insert(std::make_pair<DatabaseID, TRI_json_t*>(name, options)); _plannedDatabases.insert(std::make_pair<DatabaseID, TRI_json_t*>(name, options));
++it;
} }
return; return;
@ -559,7 +592,7 @@ void ClusterInfo::loadCurrentDatabases () {
} }
if (result.successful()) { if (result.successful()) {
result.parse(prefix + "/", true); result.parse(prefix + "/", false);
WRITE_LOCKER(_lock); WRITE_LOCKER(_lock);
clearCurrentDatabases(); clearCurrentDatabases();
@ -567,12 +600,16 @@ void ClusterInfo::loadCurrentDatabases () {
std::map<std::string, AgencyCommResultEntry>::iterator it = result._values.begin(); std::map<std::string, AgencyCommResultEntry>::iterator it = result._values.begin();
while (it != result._values.end()) { while (it != result._values.end()) {
const std::string& key = (*it).first; const std::string key = (*it).first;
// each entry consists of a database id and a collection id, separated by '/' // each entry consists of a database id and a collection id, separated by '/'
std::vector<std::string> parts = triagens::basics::StringUtils::split(key, '/'); std::vector<std::string> parts = triagens::basics::StringUtils::split(key, '/');
const std::string& database = parts[0]; if (parts.empty()) {
++it;
continue;
}
const std::string database = parts[0];
std::map<std::string, std::map<ServerID, TRI_json_t*> >::iterator it2 = _currentDatabases.find(database); std::map<std::string, std::map<ServerID, TRI_json_t*> >::iterator it2 = _currentDatabases.find(database);
@ -589,6 +626,8 @@ void ClusterInfo::loadCurrentDatabases () {
(*it).second._json = 0; (*it).second._json = 0;
(*it2).second.insert(std::make_pair<ServerID, TRI_json_t*>(parts[1], json)); (*it2).second.insert(std::make_pair<ServerID, TRI_json_t*>(parts[1], json));
} }
++it;
} }
return; return;
@ -625,7 +664,7 @@ void ClusterInfo::loadCurrentCollections () {
std::map<std::string, AgencyCommResultEntry>::iterator it = result._values.begin(); std::map<std::string, AgencyCommResultEntry>::iterator it = result._values.begin();
for (; it != result._values.end(); ++it) { for (; it != result._values.end(); ++it) {
const std::string& key = (*it).first; const std::string key = (*it).first;
// each entry consists of a database id and a collection id, separated by '/' // each entry consists of a database id and a collection id, separated by '/'
std::vector<std::string> parts = triagens::basics::StringUtils::split(key, '/'); std::vector<std::string> parts = triagens::basics::StringUtils::split(key, '/');
@ -636,8 +675,8 @@ void ClusterInfo::loadCurrentCollections () {
continue; continue;
} }
const std::string& database = parts[0]; const std::string database = parts[0];
const std::string& collection = parts[1]; const std::string collection = parts[1];
// check whether we have created an entry for the database already // check whether we have created an entry for the database already
AllCollections::iterator it2 = _collections.find(database); AllCollections::iterator it2 = _collections.find(database);
@ -917,12 +956,17 @@ std::vector<ServerID> ClusterInfo::getCurrentDBServers () {
loadCurrentDBServers(); loadCurrentDBServers();
} }
std::vector<ServerID> res; std::vector<ServerID> result;
std::map<ServerID, ServerID>::iterator i;
for (i = _DBServers.begin(); i != _DBServers.end(); ++i) { READ_LOCKER(_lock);
res.push_back(i->first); std::map<ServerID, ServerID>::iterator it = _DBServers.begin();
while (it != _DBServers.end()) {
result.push_back((*it).first);
it++;
} }
return res;
return result;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -287,13 +287,14 @@ namespace triagens {
/// @brief ask whether a cluster database exists /// @brief ask whether a cluster database exists
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
bool doesDatabaseExist (DatabaseID const& databaseID); bool doesDatabaseExist (DatabaseID const&,
bool = false);
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief get list of databases in the cluster /// @brief get list of databases in the cluster
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
vector<DatabaseID> listDatabases (); vector<DatabaseID> listDatabases (bool = false);
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief (re-)load the information about collections from the agency /// @brief (re-)load the information about collections from the agency

View File

@ -681,7 +681,7 @@ static v8::Handle<v8::Value> JS_DoesDatabaseExistClusterInfo (v8::Arguments cons
TRI_V8_EXCEPTION_USAGE(scope, "doesDatabaseExist(<database-id>)"); TRI_V8_EXCEPTION_USAGE(scope, "doesDatabaseExist(<database-id>)");
} }
const bool result = ClusterInfo::instance()->doesDatabaseExist(TRI_ObjectToString(argv[0])); const bool result = ClusterInfo::instance()->doesDatabaseExist(TRI_ObjectToString(argv[0]), true);
return scope.Close(v8::Boolean::New(result)); return scope.Close(v8::Boolean::New(result));
} }
@ -694,10 +694,10 @@ static v8::Handle<v8::Value> JS_ListDatabases (v8::Arguments const& argv) {
v8::HandleScope scope; v8::HandleScope scope;
if (argv.Length() != 0) { if (argv.Length() != 0) {
TRI_V8_EXCEPTION_USAGE(scope, "doesDatabaseExist()"); TRI_V8_EXCEPTION_USAGE(scope, "listDatabases()");
} }
vector<DatabaseID> res = ClusterInfo::instance()->listDatabases(); vector<DatabaseID> res = ClusterInfo::instance()->listDatabases(true);
v8::Handle<v8::Array> a = v8::Array::New(res.size()); v8::Handle<v8::Array> a = v8::Array::New(res.size());
vector<DatabaseID>::iterator it; vector<DatabaseID>::iterator it;
int count = 0; int count = 0;
@ -1634,8 +1634,7 @@ void TRI_InitV8Cluster (v8::Handle<v8::Context> context) {
TRI_AddMethodVocbase(rt, "drop", JS_Drop); TRI_AddMethodVocbase(rt, "drop", JS_Drop);
v8g->ClusterCommTempl = v8::Persistent<v8::ObjectTemplate>::New(isolate, rt); v8g->ClusterCommTempl = v8::Persistent<v8::ObjectTemplate>::New(isolate, rt);
TRI_AddGlobalFunctionVocbase(context, "ArangoClusterCommCtor", TRI_AddGlobalFunctionVocbase(context, "ArangoClusterCommCtor", ft->GetFunction(), true);
ft->GetFunction());
// register the global object // register the global object
ss = v8g->ClusterCommTempl->NewInstance(); ss = v8g->ClusterCommTempl->NewInstance();

View File

@ -224,20 +224,38 @@ function ClusterEnabledSuite () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
testDoesDatabaseExist : function () { testDoesDatabaseExist : function () {
var collection = { var database = {
id: "123", name: "test"
name: "mycollection",
type: 2,
status: 3, // LOADED
shardKeys: [ "_key" ],
shards: { "s1" : "myself", "s2" : "other" }
}; };
assertTrue(agency.set("Current/Collections/test/" + collection.id, collection));
assertTrue(agency.set("Plan/Databases/" + database.name, database));
assertTrue(agency.set("Current/DBServers/Foo", "Bar"));
assertTrue(agency.set("Current/DBServers/Barz", "Bat"));
assertTrue(agency.set("Current/Databases/test/Foo", database));
assertTrue(agency.set("Current/Databases/test/Barz", database));
assertTrue(ci.doesDatabaseExist("test")); assertTrue(ci.doesDatabaseExist("test"));
assertFalse(ci.doesDatabaseExist("UnitTestsAgencyNonExisting")); assertFalse(ci.doesDatabaseExist("UnitTestsAgencyNonExisting"));
}, },
////////////////////////////////////////////////////////////////////////////////
/// @brief test doesDatabaseExist
////////////////////////////////////////////////////////////////////////////////
testDoesDatabaseExistNotReady : function () {
var database = {
name: "test"
};
assertTrue(agency.set("Plan/Databases/" + database.name, database));
assertTrue(agency.set("Current/DBServers/Foo", "Bar"));
assertTrue(agency.set("Current/DBServers/Barz", "Bat"));
assertTrue(agency.set("Current/Databases/test/Foo", database));
assertFalse(ci.doesDatabaseExist("test"));
},
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief test getCollectionInfo /// @brief test getCollectionInfo
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////