1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Jan Steemann 2016-05-31 17:59:45 +02:00
commit 8dde0e8724
12 changed files with 501 additions and 339 deletions

View File

@ -10,7 +10,7 @@ newVersionNumber = $(shell cat ../../VERSION)
# per book targets # per book targets
check-summary: check-summary:
@echo "##### checking summary for $(NAME)" @echo "##### checking summary for $(NAME)"
@find ppbooks/$(NAME) -name \*.md |sed -e "s;ppbooks/$(NAME)/;;" |grep -vf SummaryBlacklist.txt |sort > /tmp/is_md.txt @find ppbooks/$(NAME) -name \*.md |sed -e "s;ppbooks/$(NAME)/;;" |grep -vf SummaryBlacklist.txt |grep -v gitbook-plugin |sort > /tmp/is_md.txt
@cat $(NAME)/SUMMARY.md |grep -v '^ *# '|grep '(' |sed -e "s;.*(;;" -e "s;).*;;" |sort > /tmp/is_summary.txt @cat $(NAME)/SUMMARY.md |grep -v '^ *# '|grep '(' |sed -e "s;.*(;;" -e "s;).*;;" |sort > /tmp/is_summary.txt
@if test "`comm -3 /tmp/is_md.txt /tmp/is_summary.txt|wc -l`" -ne 0; then \ @if test "`comm -3 /tmp/is_md.txt /tmp/is_summary.txt|wc -l`" -ne 0; then \
echo "not all files are mapped to the summary!"; \ echo "not all files are mapped to the summary!"; \
@ -159,7 +159,11 @@ build-book:
fi fi
cd ppbooks/$(NAME); if ! test -L SUMMARY.md; then ln -s ../../$(NAME)/SUMMARY.md . ; fi cd ppbooks/$(NAME); if ! test -L SUMMARY.md; then ln -s ../../$(NAME)/SUMMARY.md . ; fi
cd ppbooks/$(NAME); if ! test -f HEADER.html ; then cp ../../$(NAME)/HEADER.html . ; fi cd ppbooks/$(NAME); if ! test -f HEADER.html ; then cp ../../$(NAME)/HEADER.html . ; fi
cd ppbooks/$(NAME); if ! test -f book.json ; then cp ../../$(NAME)/book.json . ; fi if test -z "${RELEASE_DIRECTORY}"; then \
if ! test -f ppbooks/$(NAME)/book.json ; then cp $(NAME)/book.json ppbooks/$(NAME) ; fi; \
else \
if ! test -f ppbooks/$(NAME)/book.json ; then cat $(NAME)/book.json |sed "s;/devel;/${RELEASE_DIRECTORY};" > ppbooks/$(NAME)/book.json ; fi; \
fi
cd ppbooks/$(NAME); cp -a ../../$(NAME)/styles/* styles/ cd ppbooks/$(NAME); cp -a ../../$(NAME)/styles/* styles/
WD=`pwd`; \ WD=`pwd`; \
for pic in `find $(NAME) -name \*.png`; do \ for pic in `find $(NAME) -name \*.png`; do \
@ -188,7 +192,7 @@ build-book:
rm -f ./books/$(NAME)/HEADER.html rm -f ./books/$(NAME)/HEADER.html
python ../Scripts/deprecated.py python ../Scripts/deprecated.py
# make book-check-markdown-leftovers make book-check-markdown-leftovers
clean-book: clean-book:
@rm -rvf books/$(NAME) @rm -rvf books/$(NAME)

View File

@ -52,8 +52,8 @@ Every server instance comes with a `_system` database.
!SECTION ArangoDB programs !SECTION ArangoDB programs
The ArangoDB package comes with the following programs: The ArangoDB package comes with the following programs:
<!-- TODO: the next link has gone away, bent it over. -->
- `arangod`: The [ArangoDB database daemon](Arangod.md). - `arangod`: The [ArangoDB database daemon](../Administration/Configuration/Arangod.md).
This server program is intended to run as a daemon process and to serve the This server program is intended to run as a daemon process and to serve the
various clients connection to the server via TCP / HTTP. various clients connection to the server via TCP / HTTP.

View File

@ -166,7 +166,8 @@ file paths first, leading to problems when local files used the same names as ot
modules (e.g. a local file `chai.js` would cause problems when trying to load the modules (e.g. a local file `chai.js` would cause problems when trying to load the
`chai` module installed in `node_modules`). `chai` module installed in `node_modules`).
For more information see the [blog announcement of this change](https://www.arangodb.com/2015/11/foxx-module-resolution-will-change-in-2-8/) and the [upgrade guide](../Administration/Upgrading28.md#upgrading-foxx-apps-generated-by-arangodb-27-and-earlier). For more information see the [blog announcement of this change](https://www.arangodb.com/2015/11/foxx-module-resolution-will-change-in-2-8/)
and the [upgrade guide](../Administration/Upgrading/Upgrading28.md#upgrading-foxx-apps-generated-by-arangodb-27-and-earlier).
!SUBSECTION Module `org/arangodb/request` !SUBSECTION Module `org/arangodb/request`

View File

@ -37,7 +37,7 @@ contains the distance between the given point and the document in meters.
Note: the *near* simple query function is **deprecated** as of ArangoDB 2.6. Note: the *near* simple query function is **deprecated** as of ArangoDB 2.6.
The function may be removed in future versions of ArangoDB. The preferred The function may be removed in future versions of ArangoDB. The preferred
way for retrieving documents from a collection using the near operator is way for retrieving documents from a collection using the near operator is
to use the AQL *NEAR* function in an [AQL query](../Aql/GeoFunctions.md) as follows: to use the AQL *NEAR* function in an [AQL query](../../AQL/GeoFunctions.html) as follows:
FOR doc IN NEAR(@@collection, @latitude, @longitude, @limit) FOR doc IN NEAR(@@collection, @latitude, @longitude, @limit)
RETURN doc RETURN doc

View File

@ -26,7 +26,7 @@ contains the distance between the given point and the document in meters.
Note: the *within* simple query function is **deprecated** as of ArangoDB 2.6. Note: the *within* simple query function is **deprecated** as of ArangoDB 2.6.
The function may be removed in future versions of ArangoDB. The preferred The function may be removed in future versions of ArangoDB. The preferred
way for retrieving documents from a collection using the within operator is way for retrieving documents from a collection using the within operator is
to use the AQL *WITHIN* function in an [AQL query](../Aql/GeoFunctions.md) as follows: to use the AQL *WITHIN* function in an [AQL query](../../AQL//GeoFunctions.html) as follows:
FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius, @distanceAttributeName) FOR doc IN WITHIN(@@collection, @latitude, @longitude, @radius, @distanceAttributeName)
RETURN doc RETURN doc

View File

@ -36,7 +36,7 @@ business
the path to a server certificate using the \-\-server.keyfile option. the path to a server certificate using the \-\-server.keyfile option.
Endpoints can also be changed at runtime. Endpoints can also be changed at runtime.
Please refer to [HTTP Interface for Endpoints](../HttpEndpoints/README.md) Please refer to [HTTP Interface for Endpoints](../../HTTP/HttpEndpoints/index.html)
for more details. for more details.
@endDocuBlock @endDocuBlock

View File

@ -0,0 +1,159 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Kaveh Vahedipour
////////////////////////////////////////////////////////////////////////////////
#include "CleanOutServer.h"
#include "Agent.h"
#include "Job.h"
using namespace arangodb::consensus;
CleanOutServer::CleanOutServer (
Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& prefix,
std::string const& server) :
Job(snapshot, agent, jobId, creator, prefix), _server(server) {
if (exists()) {
if (status() == TODO) {
start();
}
}
}
CleanOutServer::~CleanOutServer () {}
unsigned CleanOutServer::status () const {
return 0;
}
bool CleanOutServer::create () const {
return false;
}
bool CleanOutServer::start() const {
// Copy todo to pending
Builder todo, pending;
// Get todo entry
todo.openArray();
_snapshot(toDoPrefix + _jobId).toBuilder(todo);
todo.close();
// Enter peding, remove todo, block toserver
pending.openArray();
// --- Add pending
pending.openObject();
pending.add(_agencyPrefix + pendingPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("timeStarted",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(todo.slice()[0])) {
pending.add(obj.key.copyString(), obj.value);
}
pending.close();
// --- Delete todo
pending.add(_agencyPrefix + toDoPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("op", VPackValue("delete"));
pending.close();
// --- Block toServer
pending.add(_agencyPrefix + blockedServersPrefix + _server,
VPackValue(VPackValueType::Object));
pending.add("jobId", VPackValue(_jobId));
pending.close();
// --- Announce in Sync that server is cleaning out
pending.add(_agencyPrefix + serverStatePrefix + _server,
VPackValue(VPackValueType::Object));
pending.add("cleaning", VPackValue(true));
pending.close();
pending.close();
// Preconditions
// --- Check that toServer not blocked
pending.openObject();
pending.add(_agencyPrefix + blockedServersPrefix + _server,
VPackValue(VPackValueType::Object));
pending.add("oldEmpty", VPackValue(true));
pending.close();
pending.close(); pending.close();
// Transact to agency
write_ret_t res = transact(_agent, pending);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Pending: Clean out server " + _server;
Node::Children const& databases =
_snapshot("/Plan/Collections").children();
size_t sub = 0;
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
Node const& replicationFactor = collection("replicationFactor");
if (replicationFactor.slice().getUInt() > 1) {
for (auto const& shard : collection("shards").children()) {
VPackArrayIterator dbsit(shard.second->slice());
// Only proceed if leader and create job
if ((*dbsit.begin()).copyString() != _server) {
/* MoveShardFromLeader (
_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,
shard.first, _server, shard.second->slice()[1].copyString());*/
sub++;
} else {
/* MoveShardFromFollower (
_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,
shard.first, _server, shard.second->slice()[1].copyString());*/
sub++;
}
}
}
}
}
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) <<
"Precondition failed for starting job " + _jobId;
return false;
}

View File

@ -0,0 +1,51 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Kaveh Vahedipour
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_CONSENSUS_CLEAN_OUT_SERVER_H
#define ARANGOD_CONSENSUS_CLEAN_OUT_SERVER_H 1
#include "Job.h"
#include "Supervision.h"
namespace arangodb {
namespace consensus {
struct CleanOutServer : public Job {
CleanOutServer (Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& prefix,
std::string const& server);
virtual ~CleanOutServer ();
virtual unsigned status () const override;
virtual bool create () const override;
virtual bool start() const override;
std::string const& _server;
};
}}
#endif

View File

@ -0,0 +1,221 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Kaveh Vahedipour
////////////////////////////////////////////////////////////////////////////////
#include "FailedServer.h"
#include "Agent.h"
#include "FailedLeader.h"
#include "Job.h"
using namespace arangodb::consensus;
FailedServer::FailedServer(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix,
std::string const& failed) :
Job(snapshot, agent, jobId, creator, agencyPrefix), _failed(failed) {
if (exists()) {
if (status() == TODO) {
start();
}
} else {
create();
start();
}
}
FailedServer::~FailedServer () {}
bool FailedServer::start() const {
// Copy todo to pending
Builder todo, pending;
// Get todo entry
todo.openArray();
_snapshot(toDoPrefix + _jobId).toBuilder(todo);
todo.close();
// Prepare peding entry, block toserver
pending.openArray();
// --- Add pending
pending.openObject();
pending.add(_agencyPrefix + pendingPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("timeStarted",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(todo.slice()[0])) {
pending.add(obj.key.copyString(), obj.value);
}
pending.close();
// --- Delete todo
pending.add(_agencyPrefix + toDoPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("op", VPackValue("delete"));
pending.close();
// --- Block toServer
pending.add(_agencyPrefix + blockedServersPrefix + _failed,
VPackValue(VPackValueType::Object));
pending.add("jobId", VPackValue(_jobId));
pending.close();
pending.close();
// Preconditions
// --- Check that toServer not blocked
pending.openObject();
pending.add(_agencyPrefix + blockedServersPrefix + _failed,
VPackValue(VPackValueType::Object));
pending.add("oldEmpty", VPackValue(true));
pending.close();
pending.close(); pending.close();
// Transact to agency
write_ret_t res = transact(_agent, pending);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY) <<
"Pending: DB Server " + _failed + " failed.";
Node::Children const& databases =
_snapshot("/Plan/Collections").children();
size_t sub = 0;
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
Node const& replicationFactor = collection("replicationFactor");
if (replicationFactor.slice().getUInt() > 1) {
for (auto const& shard : collection("shards").children()) {
VPackArrayIterator dbsit(shard.second->slice());
// Only proceed if leader and create job
if ((*dbsit.begin()).copyString() != _failed) {
continue;
}
FailedLeader(
_snapshot, _agent, _jobId + "-" + std::to_string(sub++), _jobId,
_agencyPrefix, database.first, collptr.first, shard.first,
_failed, shard.second->slice()[1].copyString());
}
}
}
}
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) <<
"Precondition failed for starting job " + _jobId;
return false;
}
bool FailedServer::create () const {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Todo: DB Server " + _failed + " failed.";
std::string path = _agencyPrefix + toDoPrefix + _jobId;
Builder todo;
todo.openArray();
todo.openObject();
todo.add(path, VPackValue(VPackValueType::Object));
todo.add("type", VPackValue("failedServer"));
todo.add("server", VPackValue(_failed));
todo.add("jobId", VPackValue(_jobId));
todo.add("creator", VPackValue(_creator));
todo.add("timeCreated",
VPackValue(timepointToString(std::chrono::system_clock::now())));
todo.close(); todo.close(); todo.close();
write_ret_t res = transact(_agent, todo);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) << "Failed to insert job " + _jobId;
return false;
}
unsigned FailedServer::status () const {
Node const& target = _snapshot("/Target");
if (target.exists(std::string("/ToDo/") + _jobId).size() == 2) {
return TODO;
} else if (target.exists(std::string("/Pending/") + _jobId).size() == 2) {
Node::Children const& subJobs = _snapshot(pendingPrefix).children();
size_t found = 0;
for (auto const& subJob : subJobs) {
if (!subJob.first.compare(0, _jobId.size()+1, _jobId + "-")) {
found++;
Node const& sj = *(subJob.second);
std::string subJobId = sj("jobId").slice().copyString();
std::string creator = sj("creator").slice().copyString();
FailedLeader(_snapshot, _agent, subJobId, creator, _agencyPrefix);
}
}
if (!found) {
if (finish("DBServers/" + _failed)) {
return FINISHED;
}
}
return PENDING;
} else if (target.exists(std::string("/Finished/") + _jobId).size() == 2) {
return FINISHED;
} else if (target.exists(std::string("/Failed/") + _jobId).size() == 2) {
return FAILED;
}
return NOTFOUND;
}

View File

@ -0,0 +1,52 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Kaveh Vahedipour
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_CONSENSUS_FAILED_SERVER_H
#define ARANGOD_CONSENSUS_FAILED_SERVER_H 1
#include "Job.h"
#include "Supervision.h"
namespace arangodb {
namespace consensus {
struct FailedServer : public Job {
FailedServer(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix,
std::string const& failed);
virtual ~FailedServer ();
virtual bool start() const override;
virtual bool create () const override;
virtual unsigned status () const override;
std::string const& _failed;
};
}}
#endif

View File

@ -24,7 +24,9 @@
#include "Supervision.h" #include "Supervision.h"
#include "Agent.h" #include "Agent.h"
#include "CleanOutServer.h"
#include "FailedLeader.h" #include "FailedLeader.h"
#include "FailedServer.h"
#include "Job.h" #include "Job.h"
#include "Store.h" #include "Store.h"
@ -36,336 +38,6 @@ using namespace arangodb;
using namespace arangodb::consensus; using namespace arangodb::consensus;
struct FailedServer : public Job {
FailedServer(Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& agencyPrefix,
std::string const& failed) :
Job(snapshot, agent, jobId, creator, agencyPrefix), _failed(failed) {
if (exists()) {
if (status() == TODO) {
start();
}
} else {
create();
start();
}
}
virtual ~FailedServer () {}
virtual bool start() const {
// Copy todo to pending
Builder todo, pending;
// Get todo entry
todo.openArray();
_snapshot(toDoPrefix + _jobId).toBuilder(todo);
todo.close();
// Prepare peding entry, block toserver
pending.openArray();
// --- Add pending
pending.openObject();
pending.add(_agencyPrefix + pendingPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("timeStarted",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(todo.slice()[0])) {
pending.add(obj.key.copyString(), obj.value);
}
pending.close();
// --- Delete todo
pending.add(_agencyPrefix + toDoPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("op", VPackValue("delete"));
pending.close();
// --- Block toServer
pending.add(_agencyPrefix + blockedServersPrefix + _failed,
VPackValue(VPackValueType::Object));
pending.add("jobId", VPackValue(_jobId));
pending.close();
pending.close();
// Preconditions
// --- Check that toServer not blocked
pending.openObject();
pending.add(_agencyPrefix + blockedServersPrefix + _failed,
VPackValue(VPackValueType::Object));
pending.add("oldEmpty", VPackValue(true));
pending.close();
pending.close(); pending.close();
// Transact to agency
write_ret_t res = transact(_agent, pending);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY) <<
"Pending: DB Server " + _failed + " failed.";
Node::Children const& databases =
_snapshot("/Plan/Collections").children();
size_t sub = 0;
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
Node const& replicationFactor = collection("replicationFactor");
if (replicationFactor.slice().getUInt() > 1) {
for (auto const& shard : collection("shards").children()) {
VPackArrayIterator dbsit(shard.second->slice());
// Only proceed if leader and create job
if ((*dbsit.begin()).copyString() != _failed) {
continue;
}
FailedLeader(
_snapshot, _agent, _jobId + "-" + std::to_string(sub++), _jobId,
_agencyPrefix, database.first, collptr.first, shard.first,
_failed, shard.second->slice()[1].copyString());
}
}
}
}
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) <<
"Precondition failed for starting job " + _jobId;
return false;
}
virtual bool create () const {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Todo: DB Server " + _failed + " failed.";
std::string path = _agencyPrefix + toDoPrefix + _jobId;
Builder todo;
todo.openArray();
todo.openObject();
todo.add(path, VPackValue(VPackValueType::Object));
todo.add("type", VPackValue("failedServer"));
todo.add("server", VPackValue(_failed));
todo.add("jobId", VPackValue(_jobId));
todo.add("creator", VPackValue(_creator));
todo.add("timeCreated",
VPackValue(timepointToString(std::chrono::system_clock::now())));
todo.close(); todo.close(); todo.close();
write_ret_t res = transact(_agent, todo);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) << "Failed to insert job " + _jobId;
return false;
}
virtual unsigned status () const {
Node const& target = _snapshot("/Target");
if (target.exists(std::string("/ToDo/") + _jobId).size() == 2) {
return TODO;
} else if (target.exists(std::string("/Pending/") + _jobId).size() == 2) {
Node::Children const& subJobs = _snapshot(pendingPrefix).children();
size_t found = 0;
for (auto const& subJob : subJobs) {
if (!subJob.first.compare(0, _jobId.size()+1, _jobId + "-")) {
found++;
Node const& sj = *(subJob.second);
std::string subJobId = sj("jobId").slice().copyString();
std::string creator = sj("creator").slice().copyString();
FailedLeader(_snapshot, _agent, subJobId, creator, _agencyPrefix);
}
}
if (!found) {
if (finish("DBServers/" + _failed)) {
return FINISHED;
}
}
return PENDING;
} else if (target.exists(std::string("/Finished/") + _jobId).size() == 2) {
return FINISHED;
} else if (target.exists(std::string("/Failed/") + _jobId).size() == 2) {
return FAILED;
}
return NOTFOUND;
}
std::string const& _failed;
};
struct CleanOutServer : public Job {
CleanOutServer (Node const& snapshot, Agent* agent, std::string const& jobId,
std::string const& creator, std::string const& prefix,
std::string const& server) :
Job(snapshot, agent, jobId, creator, prefix), _server(server) {
if (exists()) {
if (status() == TODO) {
start();
}
}
}
virtual ~CleanOutServer () {}
virtual unsigned status () const {
return 0;
}
virtual bool create () const {
return false;
}
virtual bool start() const {
// Copy todo to pending
Builder todo, pending;
// Get todo entry
todo.openArray();
_snapshot(toDoPrefix + _jobId).toBuilder(todo);
todo.close();
// Enter peding, remove todo, block toserver
pending.openArray();
// --- Add pending
pending.openObject();
pending.add(_agencyPrefix + pendingPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("timeStarted",
VPackValue(timepointToString(std::chrono::system_clock::now())));
for (auto const& obj : VPackObjectIterator(todo.slice()[0])) {
pending.add(obj.key.copyString(), obj.value);
}
pending.close();
// --- Delete todo
pending.add(_agencyPrefix + toDoPrefix + _jobId,
VPackValue(VPackValueType::Object));
pending.add("op", VPackValue("delete"));
pending.close();
// --- Block toServer
pending.add(_agencyPrefix + blockedServersPrefix + _server,
VPackValue(VPackValueType::Object));
pending.add("jobId", VPackValue(_jobId));
pending.close();
// --- Announce in Sync that server is cleaning out
pending.add(_agencyPrefix + serverStatePrefix + _server,
VPackValue(VPackValueType::Object));
pending.add("cleaning", VPackValue(true));
pending.close();
pending.close();
// Preconditions
// --- Check that toServer not blocked
pending.openObject();
pending.add(_agencyPrefix + blockedServersPrefix + _server,
VPackValue(VPackValueType::Object));
pending.add("oldEmpty", VPackValue(true));
pending.close();
pending.close(); pending.close();
// Transact to agency
write_ret_t res = transact(_agent, pending);
if (res.accepted && res.indices.size()==1 && res.indices[0]) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Pending: Clean out server " + _server;
Node::Children const& databases =
_snapshot("/Plan/Collections").children();
size_t sub = 0;
for (auto const& database : databases) {
for (auto const& collptr : database.second->children()) {
Node const& collection = *(collptr.second);
Node const& replicationFactor = collection("replicationFactor");
if (replicationFactor.slice().getUInt() > 1) {
for (auto const& shard : collection("shards").children()) {
VPackArrayIterator dbsit(shard.second->slice());
// Only proceed if leader and create job
if ((*dbsit.begin()).copyString() != _server) {
/* MoveShardFromLeader (
_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,
shard.first, _server, shard.second->slice()[1].copyString());*/
sub++;
} else {
/* MoveShardFromFollower (
_snapshot, _agent, _jobId + "-" + std::to_string(sub++),
_jobId, _agencyPrefix, database.first, collptr.first,
shard.first, _server, shard.second->slice()[1].copyString());*/
sub++;
}
}
}
}
}
return true;
}
LOG_TOPIC(INFO, Logger::AGENCY) <<
"Precondition failed for starting job " + _jobId;
return false;
};
std::string const& _server;
};
std::string Supervision::_agencyPrefix = "/arango"; std::string Supervision::_agencyPrefix = "/arango";
Supervision::Supervision() Supervision::Supervision()

View File

@ -84,7 +84,9 @@ add_executable(${BIN_ARANGOD}
Agency/Agent.cpp Agency/Agent.cpp
Agency/AgentCallback.cpp Agency/AgentCallback.cpp
Agency/Constituent.cpp Agency/Constituent.cpp
Agency/CleanOutServer.cpp
Agency/FailedLeader.cpp Agency/FailedLeader.cpp
Agency/FailedServer.cpp
Agency/NotifierThread.cpp Agency/NotifierThread.cpp
Agency/NotifyCallback.cpp Agency/NotifyCallback.cpp
Agency/Node.cpp Agency/Node.cpp