1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
hkernbach 2016-08-18 12:42:04 +02:00
commit a4f063ca5d
9 changed files with 135 additions and 52 deletions

View File

@ -100,6 +100,8 @@ ArangoDB comes with a set of easily graspable graphs that are used to demonstrat
You can use the `add samples` tab in the `create graph` window in the webinterface, or load the module `@arangodb/graph-examples/example-graph` in arangosh and use it to create instances of these graphs in your ArangoDB.
Once you've created them, you can [inspect them in the webinterface](../Administration/WebInterface/Graphs.md) - which was used to create the pictures below.
You [can easily look into the innards of this script](https://github.com/arangodb/arangodb/blob/devel/js/common/modules/%40arangodb/graph-examples/example-graph.js) for reference about howto manage graphs programatically.
!SUBSUBSECTION The Knows\_Graph
A set of persons knowing each other:

View File

@ -1,9 +1,9 @@
!CHAPTER Fulltext indexes
!SUBSECTION Introduction to Fulltext Indexes
This is an introduction to ArangoDB's fulltext indexes.
!SUBSECTION Introduction to Fulltext Indexes
A fulltext index can be used to find words, or prefixes of words inside documents.
A fulltext index can be defined on one attribute only, and will include all words contained in
@ -46,6 +46,7 @@ Other data types are ignored and not indexed.
<!-- js/server/modules/@arangodb/arango-collection.js -->
Ensures that a fulltext index exists:
`collection.ensureIndex({ type: "fulltext", fields: [ "field" ], minLength: minLength })`
Creates a fulltext index on all documents on attribute *field*.
@ -84,6 +85,7 @@ details is returned.
Looks up a fulltext index:
`collection.lookupFulltextIndex(attribute, minLength)`
Checks whether a fulltext index on the given attribute *attribute* exists.

View File

@ -730,6 +730,14 @@ uint64_t Node::getUInt() const {
}
bool Node::getBool() const {
if (type() == NODE) {
throw StoreException("Must not convert NODE type to bool");
}
return slice().getBool();
}
double Node::getDouble() const {
if (type() == NODE) {

View File

@ -258,6 +258,8 @@ public:
/// @brief Get insigned value (throws if type NODE or if conversion fails)
uint64_t getUInt() const;
/// @brief Get bool value (throws if type NODE or if conversion fails)
bool getBool() const;
/// @brief Get double value (throws if type NODE or if conversion fails)
double getDouble() const;

View File

@ -31,6 +31,7 @@
#include "Job.h"
#include "Store.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Basics/ConditionLocker.h"
#include "VocBase/server.h"
@ -38,6 +39,7 @@
using namespace arangodb;
using namespace arangodb::consensus;
using namespace arangodb::application_features;
std::string Supervision::_agencyPrefix = "/arango";
@ -326,45 +328,73 @@ bool Supervision::doChecks() {
}
void Supervision::run() {
CONDITION_LOCKER(guard, _cv);
TRI_ASSERT(_agent != nullptr);
// Get agency prefix after cluster init
if (_jobId == 0) {
// We need the agency prefix to work, but it is only initialized by
// some other server in the cluster. Since the supervision does not
// make sense at all without other ArangoDB servers, we wait pretty
// long here before giving up:
if (!updateAgencyPrefix(1000, 1)) {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Cannot get prefix from Agency. Stopping supervision for good.";
return;
}
}
while (!this->isStopping()) {
// Get agency prefix after cluster init
if (_jobId == 0) {
// We need the agency prefix to work, but it is only initialized by
// some other server in the cluster. Since the supervision does not
// make sense at all without other ArangoDB servers, we wait pretty
// long here before giving up:
if (!updateAgencyPrefix(1000, 1)) {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Cannot get prefix from Agency. Stopping supervision for good.";
updateSnapshot();
if (isShuttingDown()) {
handleShutdown();
} else if (_agent->leading()) {
if (!handleJobs()) {
break;
}
}
// Get bunch of job IDs from agency for future jobs
if (_jobId == 0 || _jobId == _jobIdMax) {
getUniqueIds(); // cannot fail but only hang
_cv.wait(_frequency * 1000000);
}
}
bool Supervision::isShuttingDown() {
try {
return _snapshot("/Shutdown").getBool();
} catch (...) {
return false;
}
}
void Supervision::handleShutdown() {
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Initiating shutdown";
Node::Children const& serversRegistered = _snapshot(currentServersRegisteredPrefix).children();
bool serversCleared = true;
for (auto const& server : serversRegistered) {
if (server.first == "Version") {
continue;
}
// Do nothing unless leader
if (_agent->leading()) {
_cv.wait(_frequency * 1000000);
} else {
_cv.wait();
}
// Do supervision
updateSnapshot();
doChecks();
shrinkCluster();
workJobs();
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Waiting for " << server.first << " to shutdown";
serversCleared = false;
}
if (serversCleared) {
ApplicationServer::server->beginShutdown();
}
}
bool Supervision::handleJobs() {
// Get bunch of job IDs from agency for future jobs
if (_jobId == 0 || _jobId == _jobIdMax) {
getUniqueIds(); // cannot fail but only hang
}
// Do supervision
doChecks();
shrinkCluster();
workJobs();
return true;
}
void Supervision::workJobs() {

View File

@ -145,6 +145,11 @@ class Supervision : public arangodb::Thread {
void shrinkCluster();
bool isShuttingDown();
bool handleJobs();
void handleShutdown();
Agent* _agent; /**< @brief My agent */
Node _snapshot;

View File

@ -48,6 +48,7 @@
#include "VocBase/vocbase.h"
using namespace arangodb;
using namespace arangodb::application_features;
std::atomic<bool> HeartbeatThread::HasRunOnce(false);
@ -189,24 +190,31 @@ void HeartbeatThread::runDBServer() {
// send an initial GET request to Sync/Commands/my-id
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "Looking at Sync/Commands/" + _myId;
AgencyReadTransaction trx(std::vector<std::string>(
{_agency.prefixPath() + "Shutdown",
_agency.prefixPath() + "Current/Version",
_agency.prefixPath() + "Sync/Commands/" + _myId
}));
AgencyCommResult result = _agency.getValues("Sync/Commands/" + _myId);
if (result.successful()) {
handleStateChange(result);
}
if (isStopping()) {
break;
}
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Refetching Current/Version...";
AgencyCommResult res = _agency.getValues("Current/Version");
if (!res.successful()) {
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Could not read Current/Version from agency.";
AgencyCommResult result = _agency.sendTransactionWithFailover(trx);
if (!result.successful()) {
LOG_TOPIC(WARN, Logger::HEARTBEAT)
<< "Heartbeat: Could not read from agency!";
} else {
VPackSlice s = res.slice()[0].get(
VPackSlice shutdownSlice = result.slice()[0].get(
std::vector<std::string>({_agency.prefix(), "Shutdown"})
);
if (shutdownSlice.isBool() && shutdownSlice.getBool()) {
ApplicationServer::server->beginShutdown();
break;
}
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "Looking at Sync/Commands/" + _myId;
handleStateChange(result);
VPackSlice s = result.slice()[0].get(
std::vector<std::string>({_agency.prefix(), std::string("Current"),
std::string("Version")}));
if (!s.isInteger()) {
@ -322,7 +330,8 @@ void HeartbeatThread::runCoordinator() {
}
AgencyReadTransaction trx(std::vector<std::string>(
{_agency.prefixPath() + "Plan/Version",
{_agency.prefixPath() + "Shutdown",
_agency.prefixPath() + "Plan/Version",
_agency.prefixPath() + "Current/Version",
_agency.prefixPath() + "Current/Foxxmaster",
_agency.prefixPath() + "Current/FoxxmasterQueueupdate",
@ -334,6 +343,15 @@ void HeartbeatThread::runCoordinator() {
LOG_TOPIC(WARN, Logger::HEARTBEAT)
<< "Heartbeat: Could not read from agency!";
} else {
VPackSlice shutdownSlice = result.slice()[0].get(
std::vector<std::string>({_agency.prefix(), "Shutdown"})
);
if (shutdownSlice.isBool() && shutdownSlice.getBool()) {
ApplicationServer::server->beginShutdown();
break;
}
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "Looking at Sync/Commands/" + _myId;

View File

@ -24,6 +24,7 @@
#include "RestShutdownHandler.h"
#include "Rest/HttpRequest.h"
#include "Cluster/AgencyComm.h"
#include "Cluster/ClusterFeature.h"
#include <velocypack/Builder.h>
@ -47,10 +48,25 @@ RestHandler::status RestShutdownHandler::execute() {
generateError(GeneralResponse::ResponseCode::METHOD_NOT_ALLOWED, 405);
return status::DONE;
}
bool removeFromCluster;
std::string const& remove = _request->value("remove_from_cluster", removeFromCluster);
removeFromCluster = removeFromCluster && remove == "1";
bool found;
std::string const& remove = _request->value("remove_from_cluster", found);
if (found && remove == "1") {
bool shutdownClusterFound;
std::string const& shutdownCluster = _request->value("shutdown_cluster", shutdownClusterFound);
if (shutdownClusterFound && shutdownCluster == "1") {
AgencyComm agency;
VPackBuilder builder;
builder.add(VPackValue(true));
AgencyCommResult result = agency.setValue("Shutdown", builder.slice(), 0.0);
if (!result.successful()) {
generateError(GeneralResponse::ResponseCode::SERVER_ERROR, 500);
return status::DONE;
}
removeFromCluster = true;
}
if (removeFromCluster) {
ClusterFeature* clusterFeature = ApplicationServer::getFeature<ClusterFeature>("Cluster");
clusterFeature->setUnregisterOnShutdown(true);
}

View File

@ -64,7 +64,7 @@ router.use(foxxRouter)
const installer = createRouter();
foxxRouter.use(installer)
.queryParam('legacy', joi.boolean().default(true), dd`
.queryParam('legacy', joi.boolean().default(false), dd`
Flag to install the service in legacy mode.
`)
.queryParam('upgrade', joi.boolean().default(false), dd`