mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel
This commit is contained in:
commit
3c49b204e1
|
@ -666,14 +666,18 @@ to continue, once all processes have been start up in the debugger.
|
|||
ArangoDB on Mesos
|
||||
=================
|
||||
|
||||
This will spawn a **temporary** local mesos cluster.
|
||||
|
||||
Requirements:
|
||||
|
||||
- Somewhat recent linux
|
||||
- docker
|
||||
- docker 1.10+
|
||||
- curl
|
||||
- jq
|
||||
- sed (for file editing)
|
||||
- jq (for json parsing)
|
||||
- git
|
||||
- at least 8GB RAM
|
||||
- fully open firewall inside the docker network
|
||||
|
||||
To startup a local mesos cluster:
|
||||
|
||||
|
@ -741,16 +745,25 @@ Then save the following configuration to a local file and name it `arangodb3.jso
|
|||
Adjust the lines `--master` and `--zk` to match the IP of your mesos-cluster:
|
||||
|
||||
```
|
||||
docker inspect mesos-cluster | jq '.[0].NetworkSettings.Networks.bridge.IPAddress'
|
||||
MESOS_IP=`docker inspect mesos-cluster | \
|
||||
jq '.[0].NetworkSettings.Networks.bridge.IPAddress' | \
|
||||
sed 's;";;g'`
|
||||
sed -i -e "s;172.17.0.2;${MESOS_IP};g" arangodb3.json
|
||||
```
|
||||
|
||||
And deploy the modified file to your local mesos cluster:
|
||||
|
||||
```
|
||||
curl -X POST $(docker inspect mesos-cluster | jq -r '.[0].NetworkSettings.Networks.bridge.IPAddress'):8080/v2/apps -d @arangodb3.json -H "Content-Type: application/json" | jq .
|
||||
MESOS_IP=`docker inspect mesos-cluster | \
|
||||
jq '.[0].NetworkSettings.Networks.bridge.IPAddress' | \
|
||||
sed 's;";;g'`
|
||||
curl -X POST ${MESOS_IP}:8080/v2/apps \
|
||||
-d @arangodb3.json \
|
||||
-H "Content-Type: application/json" | \
|
||||
jq .
|
||||
```
|
||||
|
||||
Point your webbrowser to `$(docker inspect mesos-cluster | jq -r '.[0].NetworkSettings.Networks.bridge.IPAddress')`:8080.
|
||||
Point your webbrowser to the IP of your `echo "http://${MESOS_IP}:8080"`.
|
||||
|
||||
Wait until arangodb is healthy.
|
||||
|
||||
|
@ -767,7 +780,7 @@ https://github.com/arangodb/arangodb-docker
|
|||
https://github.com/arangodb/arangodb-mesos-docker
|
||||
https://github.com/arangodb/arangodb-mesos-framework
|
||||
|
||||
Then adjust the docker images in the config and redeploy.
|
||||
Then adjust the docker images in the config (`arangodb3.json`) and redeploy it using the curl command above.
|
||||
|
||||
--------------------------------------------------------------------------------
|
||||
Front-End (WebUI)
|
||||
|
|
|
@ -109,6 +109,7 @@ void AgencyFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
disable();
|
||||
return;
|
||||
}
|
||||
ServerState::instance()->setRole(ServerState::ROLE_AGENT);
|
||||
|
||||
// Agency size
|
||||
if (_size < 1) {
|
||||
|
|
|
@ -66,6 +66,7 @@ static std::string const planDBServersPrefix = "/Plan/DBServers";
|
|||
static std::string const planCoordinatorsPrefix = "/Plan/Coordinators";
|
||||
static std::string const currentServersRegisteredPrefix
|
||||
= "/Current/ServersRegistered";
|
||||
static std::string const foxxmaster = "/Current/Foxxmaster";
|
||||
|
||||
std::vector<check_t> Supervision::checkDBServers() {
|
||||
std::vector<check_t> ret;
|
||||
|
@ -184,7 +185,14 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
Node::Children const serversRegistered =
|
||||
_snapshot(currentServersRegisteredPrefix).children();
|
||||
|
||||
|
||||
std::string currentFoxxmaster;
|
||||
try {
|
||||
currentFoxxmaster = _snapshot(foxxmaster).getString();
|
||||
} catch (...) {
|
||||
}
|
||||
|
||||
std::string goodServerId;
|
||||
bool foxxmasterOk = false;
|
||||
std::vector<std::string> todelete;
|
||||
for (auto const& machine : _snapshot(healthPrefix).children()) {
|
||||
if (machine.first.substr(0,2) == "Co") {
|
||||
|
@ -239,6 +247,12 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
}
|
||||
|
||||
if (good) {
|
||||
if (goodServerId.empty()) {
|
||||
goodServerId = serverID;
|
||||
}
|
||||
if (serverID == currentFoxxmaster) {
|
||||
foxxmasterOk = true;
|
||||
}
|
||||
report->add("LastHeartbeatAcked",
|
||||
VPackValue(
|
||||
timepointToString(std::chrono::system_clock::now())));
|
||||
|
@ -280,6 +294,19 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
del->close(); del->close(); del->close();
|
||||
_agent->write(del);
|
||||
}
|
||||
|
||||
if (!foxxmasterOk && !goodServerId.empty()) {
|
||||
query_t create = std::make_shared<Builder>();
|
||||
create->openArray();
|
||||
create->openArray();
|
||||
create->openObject();
|
||||
create->add(_agencyPrefix + foxxmaster, VPackValue(goodServerId));
|
||||
create->close();
|
||||
create->close();
|
||||
create->close();
|
||||
|
||||
_agent->write(create);
|
||||
}
|
||||
|
||||
return ret;
|
||||
|
||||
|
@ -294,7 +321,7 @@ bool Supervision::updateSnapshot() {
|
|||
return true;
|
||||
}
|
||||
|
||||
bool Supervision::doChecks(bool timedout) {
|
||||
bool Supervision::doChecks() {
|
||||
|
||||
checkDBServers();
|
||||
checkCoordinators();
|
||||
|
@ -306,7 +333,6 @@ void Supervision::run() {
|
|||
|
||||
CONDITION_LOCKER(guard, _cv);
|
||||
TRI_ASSERT(_agent != nullptr);
|
||||
bool timedout = false;
|
||||
|
||||
while (!this->isStopping()) {
|
||||
|
||||
|
@ -330,14 +356,14 @@ void Supervision::run() {
|
|||
|
||||
// Do nothing unless leader
|
||||
if (_agent->leading()) {
|
||||
timedout = _cv.wait(_frequency * 1000000); // quarter second
|
||||
_cv.wait(_frequency * 1000000); // quarter second
|
||||
} else {
|
||||
_cv.wait();
|
||||
}
|
||||
|
||||
// Do supervision
|
||||
updateSnapshot();
|
||||
doChecks(timedout);
|
||||
doChecks();
|
||||
shrinkCluster();
|
||||
workJobs();
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ class Supervision : public arangodb::Thread {
|
|||
Store const& store() const;
|
||||
|
||||
/// @brief Perform sanity checking
|
||||
bool doChecks(bool);
|
||||
bool doChecks();
|
||||
|
||||
/// @brief update my local agency snapshot
|
||||
bool updateSnapshot();
|
||||
|
|
|
@ -324,6 +324,7 @@ void HeartbeatThread::runCoordinator() {
|
|||
AgencyReadTransaction trx(std::vector<std::string>(
|
||||
{_agency.prefixPath() + "Plan/Version",
|
||||
_agency.prefixPath() + "Current/Version",
|
||||
_agency.prefixPath() + "Current/Foxxmaster",
|
||||
_agency.prefixPath() + "Sync/Commands/" + _myId,
|
||||
_agency.prefixPath() + "Sync/UserVersion"}));
|
||||
AgencyCommResult result = _agency.sendTransactionWithFailover(trx);
|
||||
|
@ -337,6 +338,14 @@ void HeartbeatThread::runCoordinator() {
|
|||
|
||||
handleStateChange(result);
|
||||
|
||||
VPackSlice foxxmasterSlice = result.slice()[0].get(
|
||||
std::vector<std::string>({_agency.prefix(), "Current", "Foxxmaster"})
|
||||
);
|
||||
|
||||
if (foxxmasterSlice.isString()) {
|
||||
ServerState::instance()->setFoxxmaster(foxxmasterSlice.copyString());
|
||||
}
|
||||
|
||||
VPackSlice versionSlice = result.slice()[0].get(
|
||||
std::vector<std::string>({_agency.prefix(), "Plan", "Version"}));
|
||||
|
||||
|
|
|
@ -56,7 +56,8 @@ ServerState::ServerState()
|
|||
_idOfPrimary(""),
|
||||
_state(STATE_UNDEFINED),
|
||||
_initialized(false),
|
||||
_clusterEnabled(false) {
|
||||
_clusterEnabled(false),
|
||||
_foxxmaster("") {
|
||||
storeRole(ROLE_UNDEFINED);
|
||||
}
|
||||
|
||||
|
@ -84,6 +85,8 @@ std::string ServerState::roleToString(ServerState::RoleEnum role) {
|
|||
return "SECONDARY";
|
||||
case ROLE_COORDINATOR:
|
||||
return "COORDINATOR";
|
||||
case ROLE_AGENT:
|
||||
return "AGENT";
|
||||
}
|
||||
|
||||
TRI_ASSERT(false);
|
||||
|
@ -992,3 +995,15 @@ bool ServerState::storeRole(RoleEnum role) {
|
|||
_role.store(role, std::memory_order_release);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ServerState::isFoxxmaster() {
|
||||
return !isRunningInCluster() || _foxxmaster == getId();
|
||||
}
|
||||
|
||||
std::string const& ServerState::getFoxxmaster() {
|
||||
return _foxxmaster;
|
||||
}
|
||||
|
||||
void ServerState::setFoxxmaster(std::string const& foxxmaster) {
|
||||
_foxxmaster = foxxmaster;
|
||||
}
|
||||
|
|
|
@ -38,7 +38,8 @@ class ServerState {
|
|||
ROLE_SINGLE, // is set when cluster feature is off
|
||||
ROLE_PRIMARY,
|
||||
ROLE_SECONDARY,
|
||||
ROLE_COORDINATOR
|
||||
ROLE_COORDINATOR,
|
||||
ROLE_AGENT
|
||||
};
|
||||
|
||||
/// @brief an enum describing the possible states a server can have
|
||||
|
@ -121,6 +122,14 @@ class ServerState {
|
|||
role == ServerState::ROLE_SECONDARY ||
|
||||
role == ServerState::ROLE_COORDINATOR);
|
||||
}
|
||||
|
||||
/// @brief check whether the server is an agent
|
||||
bool isAgent() { return isAgent(loadRole()); }
|
||||
|
||||
/// @brief check whether the server is an agent
|
||||
static bool isAgent(ServerState::RoleEnum role) {
|
||||
return (role == ServerState::ROLE_AGENT);
|
||||
}
|
||||
|
||||
/// @brief check whether the server is running in a cluster
|
||||
bool isRunningInCluster() { return isClusterRole(loadRole()); }
|
||||
|
@ -216,6 +225,12 @@ class ServerState {
|
|||
/// agency or is not unique, then the system keeps the old role.
|
||||
/// Returns true if there is a change and false otherwise.
|
||||
bool redetermineRole();
|
||||
|
||||
bool isFoxxmaster();
|
||||
|
||||
std::string const& getFoxxmaster();
|
||||
|
||||
void setFoxxmaster(std::string const&);
|
||||
|
||||
private:
|
||||
/// @brief atomically fetches the server role
|
||||
|
@ -313,6 +328,8 @@ class ServerState {
|
|||
|
||||
/// @brief whether or not we are a cluster member
|
||||
bool _clusterEnabled;
|
||||
|
||||
std::string _foxxmaster;
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -1102,6 +1102,22 @@ static void JS_IdServerState(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
static void JS_isFoxxmaster(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (args.Length() != 0) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("isFoxxmaster()");
|
||||
}
|
||||
|
||||
if (ServerState::instance()->isFoxxmaster()) {
|
||||
TRI_V8_RETURN_TRUE();
|
||||
} else {
|
||||
TRI_V8_RETURN_FALSE();
|
||||
}
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return the primary servers id (only for secondaries)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -2102,6 +2118,8 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
|
|||
JS_LocalInfoServerState);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("id"),
|
||||
JS_IdServerState);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("isFoxxmaster"),
|
||||
JS_isFoxxmaster);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("idOfPrimary"),
|
||||
JS_IdOfPrimaryServerState);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("description"),
|
||||
|
|
|
@ -137,9 +137,12 @@ static void raceForClusterBootstrap() {
|
|||
|
||||
void BootstrapFeature::start() {
|
||||
auto vocbase = DatabaseFeature::DATABASE->vocbase();
|
||||
|
||||
|
||||
auto ss = ServerState::instance();
|
||||
if (ss->isCoordinator()) {
|
||||
if (!ss->isRunningInCluster() && !ss->isAgent()) {
|
||||
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Running server/server.js";
|
||||
V8DealerFeature::DEALER->loadJavascript(vocbase, "server/server.js");
|
||||
} else if (ss->isCoordinator()) {
|
||||
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Racing for cluster bootstrap...";
|
||||
raceForClusterBootstrap();
|
||||
LOG_TOPIC(DEBUG, Logger::STARTUP)
|
||||
|
@ -151,9 +154,6 @@ void BootstrapFeature::start() {
|
|||
<< "Running server/bootstrap/db-server.js";
|
||||
V8DealerFeature::DEALER->loadJavascript(vocbase,
|
||||
"server/bootstrap/db-server.js");
|
||||
} else {
|
||||
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Running server/server.js";
|
||||
V8DealerFeature::DEALER->loadJavascript(vocbase, "server/server.js");
|
||||
}
|
||||
|
||||
// Start service properly:
|
||||
|
|
|
@ -48,5 +48,8 @@
|
|||
if (!result) {
|
||||
console.error('upgrade-database.js for cluster script failed!');
|
||||
}
|
||||
internal.loadStartup('server/bootstrap/foxxes.js').foxxes();
|
||||
global.ArangoAgency.set('Current/Foxxmaster', global.ArangoServerState.id());
|
||||
|
||||
return true;
|
||||
}());
|
||||
|
|
|
@ -40,11 +40,6 @@
|
|||
require('@arangodb/statistics').startup();
|
||||
}
|
||||
|
||||
// load all foxxes
|
||||
if (internal.threadNumber === 0) {
|
||||
internal.loadStartup('server/bootstrap/foxxes.js').foxxes();
|
||||
}
|
||||
|
||||
// autoload all modules and reload routing information in all threads
|
||||
internal.loadStartup('server/bootstrap/autoload.js').startup();
|
||||
internal.loadStartup('server/bootstrap/routing.js').startup();
|
||||
|
|
|
@ -102,6 +102,9 @@ var runInDatabase = function () {
|
|||
};
|
||||
|
||||
exports.manage = function () {
|
||||
if (!global.ArangoServerState.isFoxxmaster()) {
|
||||
return;
|
||||
}
|
||||
var initialDatabase = db._name();
|
||||
var now = Date.now();
|
||||
|
||||
|
|
Loading…
Reference in New Issue