diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 6361ec2ed0..14cf6a3d97 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -340,6 +340,8 @@ ClusterInfo* ClusterInfo::instance () { ClusterInfo::ClusterInfo () : _agency(), _uniqid(), + _plannedDatabases(), + _currentDatabases(), _collectionsValid(false), _serversValid(false), _DBServersValid(false) { @@ -411,27 +413,41 @@ void ClusterInfo::flush () { /// @brief ask whether a cluster database exists //////////////////////////////////////////////////////////////////////////////// -bool ClusterInfo::doesDatabaseExist (DatabaseID const& databaseID) { +bool ClusterInfo::doesDatabaseExist (DatabaseID const& databaseID, + bool reload) { int tries = 0; - if (! _collectionsValid) { - loadCurrentCollections(); + if (reload) { + loadPlannedDatabases(); + loadCurrentDatabases(); + loadCurrentDBServers(); ++tries; } while (++tries <= 2) { { READ_LOCKER(_lock); - // look up database by id - AllCollections::const_iterator it = _collections.find(databaseID); + const size_t expectedSize = _DBServers.size(); - if (it != _collections.end()) { - return true; + // look up database by name + + std::map::const_iterator it = _plannedDatabases.find(databaseID); + + if (it != _plannedDatabases.end()) { + // found the database in Plan + std::map >::const_iterator it2 = _currentDatabases.find(databaseID); + + if (it2 != _currentDatabases.end()) { + // found the database in Current + + return ((*it2).second.size() >= expectedSize); + } } } - - // must load collections outside the lock - loadCurrentCollections(); + + loadPlannedDatabases(); + loadCurrentDatabases(); + loadCurrentDBServers(); } return false; @@ -441,18 +457,33 @@ bool ClusterInfo::doesDatabaseExist (DatabaseID const& databaseID) { /// @brief get list of databases in the cluster //////////////////////////////////////////////////////////////////////////////// -vector ClusterInfo::listDatabases () { - vector res; +vector ClusterInfo::listDatabases (bool reload) { + vector result; - if (! _collectionsValid) { - loadCurrentCollections(); + if (reload) { + loadPlannedDatabases(); + loadCurrentDatabases(); + loadCurrentDBServers(); } - AllCollections::const_iterator it; - for (it = _collections.begin(); it != _collections.end(); ++it) { - res.push_back(it->first); + READ_LOCKER(_lock); + const size_t expectedSize = _DBServers.size(); + + std::map::const_iterator it = _plannedDatabases.begin(); + + while (it != _plannedDatabases.end()) { + std::map >::const_iterator it2 = _currentDatabases.find((*it).first); + + if (it2 != _currentDatabases.end()) { + if ((*it2).second.size() >= expectedSize) { + result.push_back((*it).first); + } + } + + ++it; } - return res; + + return result; } //////////////////////////////////////////////////////////////////////////////// @@ -532,6 +563,8 @@ void ClusterInfo::loadPlannedDatabases () { // steal the json (*it).second._json = 0; _plannedDatabases.insert(std::make_pair(name, options)); + + ++it; } return; @@ -559,7 +592,7 @@ void ClusterInfo::loadCurrentDatabases () { } if (result.successful()) { - result.parse(prefix + "/", true); + result.parse(prefix + "/", false); WRITE_LOCKER(_lock); clearCurrentDatabases(); @@ -567,12 +600,16 @@ void ClusterInfo::loadCurrentDatabases () { std::map::iterator it = result._values.begin(); while (it != result._values.end()) { - const std::string& key = (*it).first; + const std::string key = (*it).first; // each entry consists of a database id and a collection id, separated by '/' std::vector parts = triagens::basics::StringUtils::split(key, '/'); - const std::string& database = parts[0]; + if (parts.empty()) { + ++it; + continue; + } + const std::string database = parts[0]; std::map >::iterator it2 = _currentDatabases.find(database); @@ -589,6 +626,8 @@ void ClusterInfo::loadCurrentDatabases () { (*it).second._json = 0; (*it2).second.insert(std::make_pair(parts[1], json)); } + + ++it; } return; @@ -625,7 +664,7 @@ void ClusterInfo::loadCurrentCollections () { std::map::iterator it = result._values.begin(); for (; it != result._values.end(); ++it) { - const std::string& key = (*it).first; + const std::string key = (*it).first; // each entry consists of a database id and a collection id, separated by '/' std::vector parts = triagens::basics::StringUtils::split(key, '/'); @@ -636,8 +675,8 @@ void ClusterInfo::loadCurrentCollections () { continue; } - const std::string& database = parts[0]; - const std::string& collection = parts[1]; + const std::string database = parts[0]; + const std::string collection = parts[1]; // check whether we have created an entry for the database already AllCollections::iterator it2 = _collections.find(database); @@ -917,12 +956,17 @@ std::vector ClusterInfo::getCurrentDBServers () { loadCurrentDBServers(); } - std::vector res; - std::map::iterator i; - for (i = _DBServers.begin(); i != _DBServers.end(); ++i) { - res.push_back(i->first); + std::vector result; + + READ_LOCKER(_lock); + std::map::iterator it = _DBServers.begin(); + + while (it != _DBServers.end()) { + result.push_back((*it).first); + it++; } - return res; + + return result; } //////////////////////////////////////////////////////////////////////////////// diff --git a/arangod/Cluster/ClusterInfo.h b/arangod/Cluster/ClusterInfo.h index 7a7b252211..702f695f0d 100644 --- a/arangod/Cluster/ClusterInfo.h +++ b/arangod/Cluster/ClusterInfo.h @@ -287,13 +287,14 @@ namespace triagens { /// @brief ask whether a cluster database exists //////////////////////////////////////////////////////////////////////////////// - bool doesDatabaseExist (DatabaseID const& databaseID); + bool doesDatabaseExist (DatabaseID const&, + bool = false); //////////////////////////////////////////////////////////////////////////////// /// @brief get list of databases in the cluster //////////////////////////////////////////////////////////////////////////////// - vector listDatabases (); + vector listDatabases (bool = false); //////////////////////////////////////////////////////////////////////////////// /// @brief (re-)load the information about collections from the agency diff --git a/arangod/Cluster/v8-cluster.cpp b/arangod/Cluster/v8-cluster.cpp index e5b3d34f25..99f4f3d730 100644 --- a/arangod/Cluster/v8-cluster.cpp +++ b/arangod/Cluster/v8-cluster.cpp @@ -681,7 +681,7 @@ static v8::Handle JS_DoesDatabaseExistClusterInfo (v8::Arguments cons TRI_V8_EXCEPTION_USAGE(scope, "doesDatabaseExist()"); } - const bool result = ClusterInfo::instance()->doesDatabaseExist(TRI_ObjectToString(argv[0])); + const bool result = ClusterInfo::instance()->doesDatabaseExist(TRI_ObjectToString(argv[0]), true); return scope.Close(v8::Boolean::New(result)); } @@ -694,10 +694,10 @@ static v8::Handle JS_ListDatabases (v8::Arguments const& argv) { v8::HandleScope scope; if (argv.Length() != 0) { - TRI_V8_EXCEPTION_USAGE(scope, "doesDatabaseExist()"); + TRI_V8_EXCEPTION_USAGE(scope, "listDatabases()"); } - vector res = ClusterInfo::instance()->listDatabases(); + vector res = ClusterInfo::instance()->listDatabases(true); v8::Handle a = v8::Array::New(res.size()); vector::iterator it; int count = 0; @@ -1634,8 +1634,7 @@ void TRI_InitV8Cluster (v8::Handle context) { TRI_AddMethodVocbase(rt, "drop", JS_Drop); v8g->ClusterCommTempl = v8::Persistent::New(isolate, rt); - TRI_AddGlobalFunctionVocbase(context, "ArangoClusterCommCtor", - ft->GetFunction()); + TRI_AddGlobalFunctionVocbase(context, "ArangoClusterCommCtor", ft->GetFunction(), true); // register the global object ss = v8g->ClusterCommTempl->NewInstance(); diff --git a/js/server/tests/cluster.js b/js/server/tests/cluster.js index f67a17abf7..21cebf0b4f 100644 --- a/js/server/tests/cluster.js +++ b/js/server/tests/cluster.js @@ -224,20 +224,38 @@ function ClusterEnabledSuite () { //////////////////////////////////////////////////////////////////////////////// testDoesDatabaseExist : function () { - var collection = { - id: "123", - name: "mycollection", - type: 2, - status: 3, // LOADED - shardKeys: [ "_key" ], - shards: { "s1" : "myself", "s2" : "other" } + var database = { + name: "test" }; - assertTrue(agency.set("Current/Collections/test/" + collection.id, collection)); + + assertTrue(agency.set("Plan/Databases/" + database.name, database)); + assertTrue(agency.set("Current/DBServers/Foo", "Bar")); + assertTrue(agency.set("Current/DBServers/Barz", "Bat")); + assertTrue(agency.set("Current/Databases/test/Foo", database)); + assertTrue(agency.set("Current/Databases/test/Barz", database)); assertTrue(ci.doesDatabaseExist("test")); + assertFalse(ci.doesDatabaseExist("UnitTestsAgencyNonExisting")); }, +//////////////////////////////////////////////////////////////////////////////// +/// @brief test doesDatabaseExist +//////////////////////////////////////////////////////////////////////////////// + + testDoesDatabaseExistNotReady : function () { + var database = { + name: "test" + }; + + assertTrue(agency.set("Plan/Databases/" + database.name, database)); + assertTrue(agency.set("Current/DBServers/Foo", "Bar")); + assertTrue(agency.set("Current/DBServers/Barz", "Bat")); + assertTrue(agency.set("Current/Databases/test/Foo", database)); + + assertFalse(ci.doesDatabaseExist("test")); + }, + //////////////////////////////////////////////////////////////////////////////// /// @brief test getCollectionInfo ////////////////////////////////////////////////////////////////////////////////