mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
754b1d4ec6
|
@ -67,7 +67,7 @@ inline arangodb::consensus::write_ret_t transact (
|
||||||
LOG_TOPIC(ERR, Logger::AGENCY) << e.what();
|
LOG_TOPIC(ERR, Logger::AGENCY) << e.what();
|
||||||
}
|
}
|
||||||
|
|
||||||
LOG_TOPIC(INFO, Logger::AGENCY) << envelope->toJson();
|
LOG_TOPIC(DEBUG, Logger::AGENCY) << envelope->toJson();
|
||||||
auto ret = _agent->write(envelope);
|
auto ret = _agent->write(envelope);
|
||||||
if (waitForCommit) {
|
if (waitForCommit) {
|
||||||
auto maximum = *std::max_element(ret.indices.begin(), ret.indices.end());
|
auto maximum = *std::max_element(ret.indices.begin(), ret.indices.end());
|
||||||
|
|
|
@ -63,8 +63,6 @@ bool MoveShard::create () {
|
||||||
std::string path, now(timepointToString(std::chrono::system_clock::now()));
|
std::string path, now(timepointToString(std::chrono::system_clock::now()));
|
||||||
|
|
||||||
// DBservers
|
// DBservers
|
||||||
std::string planPath =
|
|
||||||
planColPrefix + _database + "/" + _collection + "/shards/" + _shard;
|
|
||||||
std::string curPath =
|
std::string curPath =
|
||||||
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
|
curColPrefix + _database + "/" + _collection + "/" + _shard + "/servers";
|
||||||
|
|
||||||
|
@ -250,7 +248,7 @@ JOB_STATUS MoveShard::status () {
|
||||||
|
|
||||||
Slice current = _snapshot(curPath).slice(),
|
Slice current = _snapshot(curPath).slice(),
|
||||||
plan = _snapshot(planPath).slice();
|
plan = _snapshot(planPath).slice();
|
||||||
|
|
||||||
std::vector<std::string> planv, currv;
|
std::vector<std::string> planv, currv;
|
||||||
for (auto const& srv : VPackArrayIterator(plan)) {
|
for (auto const& srv : VPackArrayIterator(plan)) {
|
||||||
planv.push_back(srv.copyString());
|
planv.push_back(srv.copyString());
|
||||||
|
|
|
@ -197,9 +197,7 @@ std::vector<VPackSlice> State::slices(arangodb::consensus::index_t start,
|
||||||
slices.push_back(VPackSlice(_log.at(i).entry->data()));
|
slices.push_back(VPackSlice(_log.at(i).entry->data()));
|
||||||
} catch (std::exception const& e) {
|
} catch (std::exception const& e) {
|
||||||
break;
|
break;
|
||||||
LOG_TOPIC(ERR, Logger::AGENCY) << start-_cur << " " << end-_cur << " " << i << " " << _log.size();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return slices;
|
return slices;
|
||||||
|
|
|
@ -273,16 +273,14 @@ bool Store::check(VPackSlice const& slice) const {
|
||||||
}
|
}
|
||||||
|
|
||||||
for (auto const& precond : VPackObjectIterator(slice)) { // Preconditions
|
for (auto const& precond : VPackObjectIterator(slice)) { // Preconditions
|
||||||
std::string path = precond.key.copyString();
|
|
||||||
bool found = false;
|
std::vector<std::string> pv = split(precond.key.copyString(), '/');
|
||||||
|
bool found = (_node.exists(pv).size() == pv.size());
|
||||||
Node node("precond");
|
Node node("precond");
|
||||||
|
if (found) {
|
||||||
try {
|
node = _node(pv);
|
||||||
node = (*this)(path);
|
|
||||||
found = true;
|
|
||||||
} catch (StoreException const&) {
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if (precond.value.isObject()) {
|
if (precond.value.isObject()) {
|
||||||
for (auto const& op : VPackObjectIterator(precond.value)) {
|
for (auto const& op : VPackObjectIterator(precond.value)) {
|
||||||
std::string const& oper = op.key.copyString();
|
std::string const& oper = op.key.copyString();
|
||||||
|
@ -366,19 +364,13 @@ bool Store::read(VPackSlice const& query, Builder& ret) const {
|
||||||
// Create response tree
|
// Create response tree
|
||||||
Node copy("copy");
|
Node copy("copy");
|
||||||
for (auto const path : query_strs) {
|
for (auto const path : query_strs) {
|
||||||
try {
|
std::vector<std::string> pv = split(path, '/');
|
||||||
copy(path) = (*this)(path);
|
size_t e = _node.exists(pv).size();
|
||||||
} catch (StoreException const&) {
|
if (e == pv.size()) { // existing
|
||||||
std::vector<std::string> pv = split(path, '/');
|
copy(pv) = _node(pv);
|
||||||
while (!pv.empty()) {
|
} else { // non-existing
|
||||||
std::string end = pv.back();
|
for (size_t i = 0; i < pv.size()-e+1; ++i) {
|
||||||
pv.pop_back();
|
pv.pop_back();
|
||||||
copy(pv).removeChild(end);
|
|
||||||
try {
|
|
||||||
(*this)(pv);
|
|
||||||
break;
|
|
||||||
} catch (...) {
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if (copy(pv).type() == LEAF && copy(pv).slice().isNone()) {
|
if (copy(pv).type() == LEAF && copy(pv).slice().isNone()) {
|
||||||
copy(pv) = arangodb::basics::VelocyPackHelper::EmptyObjectValue();
|
copy(pv) = arangodb::basics::VelocyPackHelper::EmptyObjectValue();
|
||||||
|
|
|
@ -87,8 +87,6 @@ std::vector<check_t> Supervision::checkDBServers() {
|
||||||
try { // Existing
|
try { // Existing
|
||||||
lastHeartbeatTime =
|
lastHeartbeatTime =
|
||||||
_snapshot(healthPrefix + serverID + "/LastHeartbeatSent").toJson();
|
_snapshot(healthPrefix + serverID + "/LastHeartbeatSent").toJson();
|
||||||
lastHeartbeatStatus =
|
|
||||||
_snapshot(healthPrefix + serverID + "/LastHeartbeatStatus").toJson();
|
|
||||||
lastHeartbeatAcked =
|
lastHeartbeatAcked =
|
||||||
_snapshot(healthPrefix + serverID + "/LastHeartbeatAcked").toJson();
|
_snapshot(healthPrefix + serverID + "/LastHeartbeatAcked").toJson();
|
||||||
lastStatus = _snapshot(healthPrefix + serverID + "/Status").toJson();
|
lastStatus = _snapshot(healthPrefix + serverID + "/Status").toJson();
|
||||||
|
@ -176,8 +174,6 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
||||||
_snapshot(healthPrefix + serverID + "/LastHeartbeatSent").toJson();
|
_snapshot(healthPrefix + serverID + "/LastHeartbeatSent").toJson();
|
||||||
lastHeartbeatStatus =
|
lastHeartbeatStatus =
|
||||||
_snapshot(healthPrefix + serverID + "/LastHeartbeatStatus").toJson();
|
_snapshot(healthPrefix + serverID + "/LastHeartbeatStatus").toJson();
|
||||||
lastHeartbeatAcked =
|
|
||||||
_snapshot(healthPrefix + serverID + "/LastHeartbeatAcked").toJson();
|
|
||||||
lastStatus = _snapshot(healthPrefix + serverID + "/Status").toJson();
|
lastStatus = _snapshot(healthPrefix + serverID + "/Status").toJson();
|
||||||
if (lastHeartbeatTime != heartbeatTime) { // Update
|
if (lastHeartbeatTime != heartbeatTime) { // Update
|
||||||
good = true;
|
good = true;
|
||||||
|
|
|
@ -640,14 +640,15 @@ bool HeartbeatThread::syncDBServerStatusQuo() {
|
||||||
}
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (dispatcher->addJob(job, false) == TRI_ERROR_NO_ERROR) {
|
int res = dispatcher->addJob(job, false);
|
||||||
|
if (res == TRI_ERROR_NO_ERROR) {
|
||||||
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "scheduled dbserver sync";
|
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "scheduled dbserver sync";
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
MUTEX_LOCKER(mutexLocker, _statusLock);
|
MUTEX_LOCKER(mutexLocker, _statusLock);
|
||||||
_isDispatchingChange = false;
|
_isDispatchingChange = false;
|
||||||
|
|
||||||
if (warn) {
|
if (warn && res != TRI_ERROR_SHUTTING_DOWN) {
|
||||||
LOG_TOPIC(ERR, Logger::HEARTBEAT) << "could not schedule dbserver sync";
|
LOG_TOPIC(ERR, Logger::HEARTBEAT) << "could not schedule dbserver sync";
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -221,6 +221,9 @@ std::shared_ptr<VPackBuilder> Scheduler::getUserTask(std::string const& id) {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
int Scheduler::unregisterUserTask(std::string const& id) {
|
int Scheduler::unregisterUserTask(std::string const& id) {
|
||||||
|
if (stopping) {
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
if (id.empty()) {
|
if (id.empty()) {
|
||||||
return TRI_ERROR_TASK_INVALID_ID;
|
return TRI_ERROR_TASK_INVALID_ID;
|
||||||
}
|
}
|
||||||
|
@ -258,6 +261,9 @@ int Scheduler::unregisterUserTask(std::string const& id) {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
int Scheduler::unregisterUserTasks() {
|
int Scheduler::unregisterUserTasks() {
|
||||||
|
if (stopping) {
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
Task* task = nullptr;
|
Task* task = nullptr;
|
||||||
|
|
||||||
|
@ -304,6 +310,9 @@ int Scheduler::registerTask(Task* task, ssize_t* tn) {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
int Scheduler::unregisterTask(Task* task) {
|
int Scheduler::unregisterTask(Task* task) {
|
||||||
|
if (stopping) {
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
SchedulerThread* thread = nullptr;
|
SchedulerThread* thread = nullptr;
|
||||||
|
|
||||||
std::string const taskName(task->name());
|
std::string const taskName(task->name());
|
||||||
|
@ -338,6 +347,9 @@ int Scheduler::unregisterTask(Task* task) {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
int Scheduler::destroyTask(Task* task) {
|
int Scheduler::destroyTask(Task* task) {
|
||||||
|
if (stopping) {
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
SchedulerThread* thread = nullptr;
|
SchedulerThread* thread = nullptr;
|
||||||
std::string const taskName(task->name());
|
std::string const taskName(task->name());
|
||||||
|
|
||||||
|
@ -420,6 +432,9 @@ EventLoop Scheduler::lookupLoopById(uint64_t taskId) {
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
int Scheduler::registerTask(Task* task, ssize_t* got, ssize_t want) {
|
int Scheduler::registerTask(Task* task, ssize_t* got, ssize_t want) {
|
||||||
|
if (stopping) {
|
||||||
|
return TRI_ERROR_SHUTTING_DOWN;
|
||||||
|
}
|
||||||
TRI_ASSERT(task != nullptr);
|
TRI_ASSERT(task != nullptr);
|
||||||
|
|
||||||
if (task->isUserDefined() && task->id().empty()) {
|
if (task->isUserDefined() && task->id().empty()) {
|
||||||
|
|
|
@ -92,7 +92,7 @@ bool V8TimerTask::handleTimeout() {
|
||||||
|
|
||||||
int res = DispatcherFeature::DISPATCHER->addJob(job, false);
|
int res = DispatcherFeature::DISPATCHER->addJob(job, false);
|
||||||
|
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR && res != TRI_ERROR_SHUTTING_DOWN) {
|
||||||
LOG(WARN) << "could not add task " << _command << " to queue";
|
LOG(WARN) << "could not add task " << _command << " to queue";
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
File diff suppressed because one or more lines are too long
Binary file not shown.
|
@ -1535,7 +1535,7 @@ if (list.length > 0) {
|
||||||
<img class="arangodbLogo" src="img/arangodb_logo_small.png"/>
|
<img class="arangodbLogo" src="img/arangodb_logo_small.png"/>
|
||||||
<img class="" src="img/arangodb_logo_letter.png"/>
|
<img class="" src="img/arangodb_logo_letter.png"/>
|
||||||
</div>
|
</div>
|
||||||
<p class="wrong-credentials" style="display:none">Wrong credentials!</p>
|
<p class="wrong-credentials" style="display:none">Login failed!</p>
|
||||||
<p class="checking-password" style="display:none">
|
<p class="checking-password" style="display:none">
|
||||||
<i class="fa fa-circle-o-notch fa-spin fa-fw"></i>
|
<i class="fa fa-circle-o-notch fa-spin fa-fw"></i>
|
||||||
<span class="sr-only">Loading...</span>
|
<span class="sr-only">Loading...</span>
|
||||||
|
@ -2564,6 +2564,13 @@ if (list.length > 0) {
|
||||||
<% } %>
|
<% } %>
|
||||||
|
|
||||||
<div class="information" id="infoCoords">
|
<div class="information" id="infoCoords">
|
||||||
|
<span class="positive"><span> <%= scaleProperties.coordsOk %> </span><i class="fa fa-check-circle"></i></span>
|
||||||
|
<% if (scaleProperties.coordsError) { %>
|
||||||
|
<span class="negative"><span> <%= scaleProperties.coordsError %> </span><i class="fa fa-exclamation-circle"></i></span>
|
||||||
|
<% } %>
|
||||||
|
<% if (scaleProperties.coordsPending && scaling === true) { %>
|
||||||
|
<span class="warning"><span> <%= scaleProperties.coordsPending %> </span><i class="fa fa-circle-o-notch fa-spin"></i></span>
|
||||||
|
<% } %>
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
@ -2620,8 +2627,8 @@ if (list.length > 0) {
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
|
|
||||||
<div class="pure-u-3-4">
|
<div class="pure-u-3-4">
|
||||||
|
|
||||||
<% if (scaling === true) { %>
|
<% if (scaling === true) { %>
|
||||||
<div class="scaleGroup" id="scaleCoords">
|
<div class="scaleGroup" id="scaleCoords">
|
||||||
<div style="text-align: -webkit-right;">
|
<div style="text-align: -webkit-right;">
|
||||||
|
@ -2633,6 +2640,13 @@ if (list.length > 0) {
|
||||||
<% } %>
|
<% } %>
|
||||||
|
|
||||||
<div class="information" id="infoDBs">
|
<div class="information" id="infoDBs">
|
||||||
|
<span class="positive"><span> <%= scaleProperties.dbsOk %> </span><i class="fa fa-check-circle"></i></span>
|
||||||
|
<% if (scaleProperties.dbsError) { %>
|
||||||
|
<span class="negative"><span> <%= scaleProperties.dbsError %> </span><i class="fa fa-exclamation-circle"></i></span>
|
||||||
|
<% } %>
|
||||||
|
<% if (scaleProperties.dbsPending && scaling === true) { %>
|
||||||
|
<span class="warning"><span> <%= scaleProperties.dbsPending %> </span><i class="fa fa-circle-o-notch fa-spin"></i></span>
|
||||||
|
<% } %>
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
</div>
|
</div>
|
||||||
|
@ -3313,4 +3327,4 @@ var cutByResolution = function (str) {
|
||||||
</div>
|
</div>
|
||||||
|
|
||||||
<div id="workMonitorContent" class="innerContent">
|
<div id="workMonitorContent" class="innerContent">
|
||||||
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img class="arangodbLogo" src="img/arangodb_logo_big.png"></a> <a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a> <a class="version"><span>VERSION:</span><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1465511522494"></script><script src="app.js?version=1465511522494"></script></body></html>
|
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img class="arangodbLogo" src="img/arangodb_logo_big.png"></a> <a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a> <a class="version"><span>VERSION:</span><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1465552848551"></script><script src="app.js?version=1465552848551"></script></body></html>
|
Binary file not shown.
|
@ -206,7 +206,20 @@
|
||||||
if (name === null || name === false) {
|
if (name === null || name === false) {
|
||||||
name = "root";
|
name = "root";
|
||||||
}
|
}
|
||||||
window.open("query/download/" + encodeURIComponent(name));
|
var url = "query/download/" + encodeURIComponent(name);
|
||||||
|
$.ajax(url)
|
||||||
|
.success(function(result, dummy, request) {
|
||||||
|
var blob = new Blob([JSON.stringify(result)], {type: "application/octet-stream"});
|
||||||
|
var blobUrl = window.URL.createObjectURL(blob);
|
||||||
|
var a = document.createElement("a");
|
||||||
|
document.body.appendChild(a);
|
||||||
|
a.style = "display: none";
|
||||||
|
a.href = blobUrl;
|
||||||
|
a.download = request.getResponseHeader("Content-Disposition").replace(/.* filename="([^")]*)"/, "$1");
|
||||||
|
a.click();
|
||||||
|
window.URL.revokeObjectURL(blobUrl);
|
||||||
|
document.body.removeChild(a);
|
||||||
|
});
|
||||||
});
|
});
|
||||||
},
|
},
|
||||||
|
|
||||||
|
@ -384,15 +397,29 @@
|
||||||
query = editor.getValue();
|
query = editor.getValue();
|
||||||
|
|
||||||
if (query !== '' || query !== undefined || query !== null) {
|
if (query !== '' || query !== undefined || query !== null) {
|
||||||
|
var url;
|
||||||
if (Object.keys(this.bindParamTableObj).length === 0) {
|
if (Object.keys(this.bindParamTableObj).length === 0) {
|
||||||
window.open("query/result/download/" + encodeURIComponent(btoa(JSON.stringify({ query: query }))));
|
url = "query/result/download/" + encodeURIComponent(btoa(JSON.stringify({ query: query })));
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
window.open("query/result/download/" + encodeURIComponent(btoa(JSON.stringify({
|
url = "query/result/download/" + encodeURIComponent(btoa(JSON.stringify({
|
||||||
query: query,
|
query: query,
|
||||||
bindVars: this.bindParamTableObj
|
bindVars: this.bindParamTableObj
|
||||||
}))));
|
})));
|
||||||
}
|
}
|
||||||
|
$.ajax(url)
|
||||||
|
.success(function(result, dummy, request) {
|
||||||
|
var blob = new Blob([JSON.stringify(result)], {type: "application/octet-stream"});
|
||||||
|
var blobUrl = window.URL.createObjectURL(blob);
|
||||||
|
var a = document.createElement("a");
|
||||||
|
document.body.appendChild(a);
|
||||||
|
a.style = "display: none";
|
||||||
|
a.href = blobUrl;
|
||||||
|
a.download = request.getResponseHeader("Content-Disposition").replace(/.* filename="([^")]*)"/, "$1");
|
||||||
|
a.click();
|
||||||
|
window.URL.revokeObjectURL(blobUrl);
|
||||||
|
document.body.removeChild(a);
|
||||||
|
});
|
||||||
}
|
}
|
||||||
else {
|
else {
|
||||||
arangoHelper.arangoError("Query error", "could not query result.");
|
arangoHelper.arangoError("Query error", "could not query result.");
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
"ERROR_FILE_EXISTS" : { "code" : 27, "message" : "file exists" },
|
"ERROR_FILE_EXISTS" : { "code" : 27, "message" : "file exists" },
|
||||||
"ERROR_LOCKED" : { "code" : 28, "message" : "locked" },
|
"ERROR_LOCKED" : { "code" : 28, "message" : "locked" },
|
||||||
"ERROR_DEADLOCK" : { "code" : 29, "message" : "deadlock detected" },
|
"ERROR_DEADLOCK" : { "code" : 29, "message" : "deadlock detected" },
|
||||||
|
"ERROR_SHUTTING_DOWN" : { "code" : 30, "message" : "shutdown in progress" },
|
||||||
"ERROR_HTTP_BAD_PARAMETER" : { "code" : 400, "message" : "bad parameter" },
|
"ERROR_HTTP_BAD_PARAMETER" : { "code" : 400, "message" : "bad parameter" },
|
||||||
"ERROR_HTTP_UNAUTHORIZED" : { "code" : 401, "message" : "unauthorized" },
|
"ERROR_HTTP_UNAUTHORIZED" : { "code" : 401, "message" : "unauthorized" },
|
||||||
"ERROR_HTTP_FORBIDDEN" : { "code" : 403, "message" : "forbidden" },
|
"ERROR_HTTP_FORBIDDEN" : { "code" : 403, "message" : "forbidden" },
|
||||||
|
|
|
@ -34,6 +34,7 @@ var ArangoCollection = arangodb.ArangoCollection;
|
||||||
var ArangoError = arangodb.ArangoError;
|
var ArangoError = arangodb.ArangoError;
|
||||||
var request = require("@arangodb/request").request;
|
var request = require("@arangodb/request").request;
|
||||||
var wait = require("internal").wait;
|
var wait = require("internal").wait;
|
||||||
|
var _ = require("lodash");
|
||||||
|
|
||||||
var endpointToURL = function (endpoint) {
|
var endpointToURL = function (endpoint) {
|
||||||
if (endpoint.substr(0,6) === "ssl://") {
|
if (endpoint.substr(0,6) === "ssl://") {
|
||||||
|
@ -969,14 +970,21 @@ function launchJob() {
|
||||||
var shards = Object.keys(jobs.scheduled);
|
var shards = Object.keys(jobs.scheduled);
|
||||||
if (shards.length > 0) {
|
if (shards.length > 0) {
|
||||||
var jobInfo = jobs.scheduled[shards[0]];
|
var jobInfo = jobs.scheduled[shards[0]];
|
||||||
registerTask({
|
try {
|
||||||
database: jobInfo.database,
|
registerTask({
|
||||||
params: {database: jobInfo.database, shard: jobInfo.shard,
|
database: jobInfo.database,
|
||||||
planId: jobInfo.planId, leader: jobInfo.leader},
|
params: {database: jobInfo.database, shard: jobInfo.shard,
|
||||||
command: function(params) {
|
planId: jobInfo.planId, leader: jobInfo.leader},
|
||||||
require("@arangodb/cluster").synchronizeOneShard(
|
command: function(params) {
|
||||||
params.database, params.shard, params.planId, params.leader);
|
require("@arangodb/cluster").synchronizeOneShard(
|
||||||
}});
|
params.database, params.shard, params.planId, params.leader);
|
||||||
|
}});
|
||||||
|
} catch (err) {
|
||||||
|
if (! require("internal").isStopping()) {
|
||||||
|
console.error("Could not registerTask for shard synchronization.");
|
||||||
|
}
|
||||||
|
return;
|
||||||
|
}
|
||||||
global.KEY_SET("shardSynchronization", "running", jobInfo);
|
global.KEY_SET("shardSynchronization", "running", jobInfo);
|
||||||
console.debug("scheduleOneShardSynchronization: have launched job", jobInfo);
|
console.debug("scheduleOneShardSynchronization: have launched job", jobInfo);
|
||||||
delete jobs.scheduled[shards[0]];
|
delete jobs.scheduled[shards[0]];
|
||||||
|
@ -993,6 +1001,8 @@ function synchronizeOneShard(database, shard, planId, leader) {
|
||||||
// synchronize this shard from the leader
|
// synchronize this shard from the leader
|
||||||
// this function will throw if anything goes wrong
|
// this function will throw if anything goes wrong
|
||||||
|
|
||||||
|
var isStopping = require("internal").isStopping;
|
||||||
|
|
||||||
var ok = false;
|
var ok = false;
|
||||||
const rep = require("@arangodb/replication");
|
const rep = require("@arangodb/replication");
|
||||||
|
|
||||||
|
@ -1006,6 +1016,9 @@ function synchronizeOneShard(database, shard, planId, leader) {
|
||||||
// can only be one syncCollection in flight
|
// can only be one syncCollection in flight
|
||||||
// at a time
|
// at a time
|
||||||
while (true) {
|
while (true) {
|
||||||
|
if (isStopping()) {
|
||||||
|
throw "server is shutting down";
|
||||||
|
}
|
||||||
try {
|
try {
|
||||||
sy = rep.syncCollection(shard,
|
sy = rep.syncCollection(shard,
|
||||||
{ endpoint: ep, incremental: true,
|
{ endpoint: ep, incremental: true,
|
||||||
|
@ -1087,12 +1100,16 @@ function synchronizeOneShard(database, shard, planId, leader) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
catch (err2) {
|
catch (err2) {
|
||||||
console.error("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s",
|
if (!isStopping()) {
|
||||||
database, shard, database, planId, JSON.stringify(err2));
|
console.error("synchronization of local shard '%s/%s' for central '%s/%s' failed: %s",
|
||||||
|
database, shard, database, planId, JSON.stringify(err2));
|
||||||
|
}
|
||||||
}
|
}
|
||||||
// Tell others that we are done:
|
// Tell others that we are done:
|
||||||
global.KEY_SET("shardSynchronization", "running", null);
|
global.KEY_SET("shardSynchronization", "running", null);
|
||||||
launchJob(); // start a new one if needed
|
if (!isStopping()) {
|
||||||
|
launchJob(); // start a new one if needed
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -1673,6 +1690,16 @@ var bootstrapDbServers = function (isRelaunch) {
|
||||||
/// @brief shard distribution
|
/// @brief shard distribution
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
function format(x) {
|
||||||
|
var r = {};
|
||||||
|
var keys = Object.keys(x);
|
||||||
|
for (var i = 0; i < keys.length; ++i) {
|
||||||
|
var y = x[keys[i]];
|
||||||
|
r[keys[i]] = { leader: y[0], followers: y.slice(1) };
|
||||||
|
}
|
||||||
|
return r;
|
||||||
|
}
|
||||||
|
|
||||||
function shardDistribution() {
|
function shardDistribution() {
|
||||||
var db = require("internal").db;
|
var db = require("internal").db;
|
||||||
var dbName = db._name();
|
var dbName = db._name();
|
||||||
|
@ -1690,8 +1717,8 @@ function shardDistribution() {
|
||||||
global.ArangoClusterInfo.getCollectionInfoCurrent(
|
global.ArangoClusterInfo.getCollectionInfoCurrent(
|
||||||
dbName, collName, shardNames[j]).servers;
|
dbName, collName, shardNames[j]).servers;
|
||||||
}
|
}
|
||||||
result[collName] = {Plan: collInfo.shards,
|
result[collName] = {Plan: format(collInfo.shards),
|
||||||
Current: collInfoCurrent};
|
Current: format(collInfoCurrent)};
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -1726,12 +1753,17 @@ function rebalanceShards() {
|
||||||
var shardNames = Object.keys(collInfo.shards);
|
var shardNames = Object.keys(collInfo.shards);
|
||||||
for (k = 0; k < shardNames.length; k++) {
|
for (k = 0; k < shardNames.length; k++) {
|
||||||
var shardName = shardNames[k];
|
var shardName = shardNames[k];
|
||||||
dbTab[collInfo.shards[shardName][0]].push([shardName,true]);
|
shardMap[shardName] = { database: databases[i], collection: collName,
|
||||||
|
servers: collInfo.shards[shardName],
|
||||||
|
weight: 1 };
|
||||||
|
dbTab[collInfo.shards[shardName][0]].push(
|
||||||
|
{ shard: shardName, leader: true,
|
||||||
|
weight: shardMap[shardName].weight });
|
||||||
for (l = 1; l < collInfo.shards[shardName]; ++l) {
|
for (l = 1; l < collInfo.shards[shardName]; ++l) {
|
||||||
dbTab[collInfo.shards[shardName][l]].push([shardName,false]);
|
dbTab[collInfo.shards[shardName][l]].push(
|
||||||
|
{ shard: shardName, leader: false,
|
||||||
|
weight: shardMap[shardName].weight });
|
||||||
}
|
}
|
||||||
shardMap[shardName] = [databases[i], collName,
|
|
||||||
collInfo.shards[shardName]];
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} finally {
|
} finally {
|
||||||
|
|
|
@ -29,6 +29,7 @@ ERROR_IP_ADDRESS_INVALID,25,"IP address is invalid","Will be raised when the str
|
||||||
ERROR_FILE_EXISTS,27,"file exists","Will be raised when a file already exists."
|
ERROR_FILE_EXISTS,27,"file exists","Will be raised when a file already exists."
|
||||||
ERROR_LOCKED,28,"locked","Will be raised when a resource or an operation is locked."
|
ERROR_LOCKED,28,"locked","Will be raised when a resource or an operation is locked."
|
||||||
ERROR_DEADLOCK,29,"deadlock detected","Will be raised when a deadlock is detected when accessing collections."
|
ERROR_DEADLOCK,29,"deadlock detected","Will be raised when a deadlock is detected when accessing collections."
|
||||||
|
ERROR_SHUTTING_DOWN,30,"shutdown in progress","Will be raised when a call cannot succeed because a server shutdown is already in progress."
|
||||||
|
|
||||||
################################################################################
|
################################################################################
|
||||||
## HTTP standard errors
|
## HTTP standard errors
|
||||||
|
|
|
@ -33,6 +33,7 @@ void TRI_InitializeErrorMessages () {
|
||||||
REG_ERROR(ERROR_FILE_EXISTS, "file exists");
|
REG_ERROR(ERROR_FILE_EXISTS, "file exists");
|
||||||
REG_ERROR(ERROR_LOCKED, "locked");
|
REG_ERROR(ERROR_LOCKED, "locked");
|
||||||
REG_ERROR(ERROR_DEADLOCK, "deadlock detected");
|
REG_ERROR(ERROR_DEADLOCK, "deadlock detected");
|
||||||
|
REG_ERROR(ERROR_SHUTTING_DOWN, "shutdown in progress");
|
||||||
REG_ERROR(ERROR_HTTP_BAD_PARAMETER, "bad parameter");
|
REG_ERROR(ERROR_HTTP_BAD_PARAMETER, "bad parameter");
|
||||||
REG_ERROR(ERROR_HTTP_UNAUTHORIZED, "unauthorized");
|
REG_ERROR(ERROR_HTTP_UNAUTHORIZED, "unauthorized");
|
||||||
REG_ERROR(ERROR_HTTP_FORBIDDEN, "forbidden");
|
REG_ERROR(ERROR_HTTP_FORBIDDEN, "forbidden");
|
||||||
|
|
|
@ -61,6 +61,9 @@
|
||||||
/// Will be raised when a resource or an operation is locked.
|
/// Will be raised when a resource or an operation is locked.
|
||||||
/// - 29: @LIT{deadlock detected}
|
/// - 29: @LIT{deadlock detected}
|
||||||
/// Will be raised when a deadlock is detected when accessing collections.
|
/// Will be raised when a deadlock is detected when accessing collections.
|
||||||
|
/// - 30: @LIT{shutdown in progress}
|
||||||
|
/// Will be raised when a call cannot succeed because a server shutdown is
|
||||||
|
/// already in progress.
|
||||||
/// - 400: @LIT{bad parameter}
|
/// - 400: @LIT{bad parameter}
|
||||||
/// Will be raised when the HTTP request does not fulfill the requirements.
|
/// Will be raised when the HTTP request does not fulfill the requirements.
|
||||||
/// - 401: @LIT{unauthorized}
|
/// - 401: @LIT{unauthorized}
|
||||||
|
@ -911,6 +914,17 @@ void TRI_InitializeErrorMessages ();
|
||||||
|
|
||||||
#define TRI_ERROR_DEADLOCK (29)
|
#define TRI_ERROR_DEADLOCK (29)
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief 30: ERROR_SHUTTING_DOWN
|
||||||
|
///
|
||||||
|
/// shutdown in progress
|
||||||
|
///
|
||||||
|
/// Will be raised when a call cannot succeed because a server shutdown is
|
||||||
|
/// already in progress.
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
#define TRI_ERROR_SHUTTING_DOWN (30)
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief 400: ERROR_HTTP_BAD_PARAMETER
|
/// @brief 400: ERROR_HTTP_BAD_PARAMETER
|
||||||
///
|
///
|
||||||
|
|
|
@ -1,13 +1,5 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
echo ===============================================================
|
|
||||||
echo Note that it is expected that this cluster test writes warnings
|
|
||||||
echo about termination signals to V8 contexts, please ignore!
|
|
||||||
echo ===============================================================
|
|
||||||
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js
|
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js
|
||||||
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js --cluster true
|
scripts/unittest shell_server --test js/common/tests/shell/shell-quickie.js --cluster true
|
||||||
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js
|
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js
|
||||||
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js --cluster true
|
scripts/unittest shell_client --test js/common/tests/shell/shell-quickie.js --cluster true
|
||||||
echo ===============================================================
|
|
||||||
echo Note that it is expected that this cluster test writes warnings
|
|
||||||
echo about termination signals to V8 contexts, please ignore!
|
|
||||||
echo ===============================================================
|
|
||||||
|
|
|
@ -0,0 +1,73 @@
|
||||||
|
#!/bin/bash
|
||||||
|
NRAGENTS=$1
|
||||||
|
if [ "$NRAGENTS" == "" ] ; then
|
||||||
|
NRAGENTS=1
|
||||||
|
fi
|
||||||
|
if [[ $(( $NRAGENTS % 2 )) == 0 ]]; then
|
||||||
|
echo Number of agents must be odd.
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
echo Number of Agents: $NRAGENTS
|
||||||
|
NRDBSERVERS=$2
|
||||||
|
if [ "$NRDBSERVERS" == "" ] ; then
|
||||||
|
NRDBSERVERS=2
|
||||||
|
fi
|
||||||
|
echo Number of DBServers: $NRDBSERVERS
|
||||||
|
NRCOORDINATORS=$3
|
||||||
|
if [ "$NRCOORDINATORS" == "" ] ; then
|
||||||
|
NRCOORDINATORS=1
|
||||||
|
fi
|
||||||
|
echo Number of Coordinators: $NRCOORDINATORS
|
||||||
|
|
||||||
|
if [ ! -z "$4" ] ; then
|
||||||
|
if [ "$4" == "C" ] ; then
|
||||||
|
COORDINATORCONSOLE=1
|
||||||
|
echo Starting one coordinator in terminal with --console
|
||||||
|
elif [ "$4" == "D" ] ; then
|
||||||
|
CLUSTERDEBUGGER=1
|
||||||
|
echo Running cluster in debugger.
|
||||||
|
elif [ "$4" == "R" ] ; then
|
||||||
|
RRDEBUGGER=1
|
||||||
|
echo Running cluster in rr with --console.
|
||||||
|
fi
|
||||||
|
fi
|
||||||
|
|
||||||
|
SECONDARIES="$5"
|
||||||
|
|
||||||
|
shutdown() {
|
||||||
|
PORT=$1
|
||||||
|
echo -n "$PORT "
|
||||||
|
curl -X DELETE http://localhost:$PORT/_admin/shutdown >/dev/null
|
||||||
|
echo
|
||||||
|
}
|
||||||
|
|
||||||
|
if [ -n "$SECONDARIES" ]; then
|
||||||
|
echo "Shutting down secondaries..."
|
||||||
|
PORTTOPSE=`expr 8729 + $NRDBSERVERS - 1`
|
||||||
|
for PORT in `seq 8729 $PORTTOPSE` ; do
|
||||||
|
shutdown $PORT
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
|
||||||
|
echo Shutting down Coordiantors...
|
||||||
|
PORTTOPCO=`expr 8530 + $NRCOORDINATORS - 1`
|
||||||
|
for p in `seq 8530 $PORTTOPCO` ; do
|
||||||
|
shutdown $p
|
||||||
|
done
|
||||||
|
|
||||||
|
echo Shutting down DBServers...
|
||||||
|
PORTTOPDB=`expr 8629 + $NRDBSERVERS - 1`
|
||||||
|
for p in `seq 8629 $PORTTOPDB` ; do
|
||||||
|
shutdown $p
|
||||||
|
done
|
||||||
|
|
||||||
|
sleep 1
|
||||||
|
|
||||||
|
echo Shutting down agency ...
|
||||||
|
for aid in `seq 0 $(( $NRAGENTS - 1 ))`; do
|
||||||
|
port=$(( 4001 + $aid ))
|
||||||
|
shutdown $port
|
||||||
|
done
|
||||||
|
|
||||||
|
echo Done, your cluster is gone
|
||||||
|
|
|
@ -0,0 +1,75 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
if [ $# -eq 0 ]
|
||||||
|
then
|
||||||
|
echo Number of agents not specified starting with 3.
|
||||||
|
NRAGENTS=3
|
||||||
|
else
|
||||||
|
NRAGENTS=$1
|
||||||
|
echo Number of Agents: $NRAGENTS
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [ ! -d arangod ] || [ ! -d arangosh ] || [ ! -d UnitTests ] ; then
|
||||||
|
echo Must be started in the main ArangoDB source directory.
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
if [[ $(( $NRAGENTS % 2 )) == 0 ]]; then
|
||||||
|
echo Number of agents must be odd.
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
rm -rf agency
|
||||||
|
mkdir agency
|
||||||
|
echo -n "Starting agency ... "
|
||||||
|
if [ $NRAGENTS -gt 1 ]; then
|
||||||
|
for aid in `seq 0 $(( $NRAGENTS - 2 ))`; do
|
||||||
|
port=$(( 4001 + $aid ))
|
||||||
|
build/bin/arangod \
|
||||||
|
-c none \
|
||||||
|
--agency.id $aid \
|
||||||
|
--agency.size $NRAGENTS \
|
||||||
|
--agency.supervision true \
|
||||||
|
--agency.supervision-frequency 1 \
|
||||||
|
--agency.wait-for-sync false \
|
||||||
|
--database.directory agency/data$port \
|
||||||
|
--javascript.app-path ./js/apps \
|
||||||
|
--javascript.startup-directory ./js \
|
||||||
|
--javascript.v8-contexts 1 \
|
||||||
|
--log.file agency/$port.log \
|
||||||
|
--server.authentication false \
|
||||||
|
--server.endpoint tcp://127.0.0.1:$port \
|
||||||
|
--server.statistics false \
|
||||||
|
--agency.compaction-step-size 1000 \
|
||||||
|
--log.force-direct true \
|
||||||
|
> agency/$port.stdout 2>&1 &
|
||||||
|
done
|
||||||
|
fi
|
||||||
|
for aid in `seq 0 $(( $NRAGENTS - 1 ))`; do
|
||||||
|
endpoints="$endpoints --agency.endpoint tcp://localhost:$(( 4001 + $aid ))"
|
||||||
|
done
|
||||||
|
build/bin/arangod \
|
||||||
|
-c none \
|
||||||
|
$endpoints \
|
||||||
|
--agency.id $(( $NRAGENTS - 1 )) \
|
||||||
|
--agency.notify true \
|
||||||
|
--agency.size $NRAGENTS \
|
||||||
|
--agency.supervision true \
|
||||||
|
--agency.supervision-frequency 1 \
|
||||||
|
--agency.wait-for-sync false \
|
||||||
|
--database.directory agency/data$(( 4001 + $aid )) \
|
||||||
|
--javascript.app-path ./js/apps \
|
||||||
|
--javascript.startup-directory ./js \
|
||||||
|
--javascript.v8-contexts 1 \
|
||||||
|
--log.file agency/$(( 4001 + $aid )).log \
|
||||||
|
--server.authentication false \
|
||||||
|
--server.endpoint tcp://127.0.0.1:$(( 4001 + $aid )) \
|
||||||
|
--server.statistics false \
|
||||||
|
--agency.compaction-step-size 1000 \
|
||||||
|
--log.force-direct true \
|
||||||
|
> agency/$(( 4001 + $aid )).stdout 2>&1 &
|
||||||
|
|
||||||
|
echo " done."
|
||||||
|
echo "Your agents are ready at port 4001 onward"
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue