1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
Michael Hackstein 2016-05-09 13:45:20 +02:00
commit ab13d1e3a6
21 changed files with 400 additions and 151 deletions

1
.gitignore vendored
View File

@ -13,6 +13,7 @@
*.gcno
*.gcda
.DS_Store
*.swp
*.diff
*.patch

View File

@ -73,12 +73,69 @@ endif()
set(ROCKSDB_PLATFORM_POSIX 1)
################################################################################
## OPERATION SYSTEM
################################################################################
set(OS_DEFINE "")
if (WIN32)
set(WINDOWS TRUE)
set(MSBUILD TRUE)
set(OS_DEFINE "OS_WIN")
elseif (UNIX AND NOT APPLE)
if(CMAKE_SYSTEM_NAME MATCHES ".*Linux")
set(LINUX TRUE)
set(OS_DEFINE "OS_LINUX")
elseif (CMAKE_SYSTEM_NAME MATCHES "kFreeBSD.*")
set(FREEBSD TRUE)
set(OS_DEFINE "OS_FREEBSD")
elseif (CMAKE_SYSTEM_NAME MATCHES "kNetBSD.*|NetBSD.*")
set(NETBSD TRUE)
set(OS_DEFINE "OS_FREEBSD")
elseif (CMAKE_SYSTEM_NAME MATCHES "kOpenBSD.*|OpenBSD.*")
set(OPENBSD TRUE)
set(OS_DEFINE "OS_OPENBSD")
elseif (CMAKE_SYSTEM_NAME MATCHES ".*GNU.*")
set(GNU TRUE)
set(OS_DEFINE "OS_LINUX")
elseif (CMAKE_SYSTEM_NAME MATCHES ".*BSDI.*")
set(BSDI TRUE)
set(OS_DEFINE "OS_FREEBSD")
elseif (CMAKE_SYSTEM_NAME MATCHES "DragonFly.*|FreeBSD")
set(FREEBSD TRUE)
set(OS_DEFINE "OS_FREEBSD")
elseif (CMAKE_SYSTEM_NAME MATCHES "SYSV5.*")
set(SYSV5 TRUE)
set(OS_DEFINE "OS_FREEBSD")
elseif ((CMAKE_SYSTEM_NAME MATCHES "Solaris.*") OR (CMAKE_SYSTEM_NAME MATCHES "SunOS.*"))
set(SOLARIS TRUE)
set(OS_DEFINE "OS_SOLARIS")
elseif (CMAKE_SYSTEM_NAME MATCHES "HP-UX.*")
set(HPUX TRUE)
set(OS_DEFINE "OS_SOLARIS")
elseif (CMAKE_SYSTEM_NAME MATCHES "AIX.*")
set(AIX TRUE)
set(OS_DEFINE "OS_SOLARIS")
elseif (CMAKE_SYSTEM_NAME MATCHES "Minix.*")
set(MINIX TRUE)
set(OS_DEFINE "OS_FREEBSD")
endif ()
elseif (APPLE)
if (CMAKE_SYSTEM_NAME MATCHES ".*Darwin.*")
set(DARWIN TRUE)
set(OS_DEFINE "OS_MACOSX")
elseif (CMAKE_SYSTEM_NAME MATCHES ".*MacOS.*")
set(MACOSX TRUE)
set(OS_DEFINE "OS_MACOSX")
endif ()
endif ()
if (CMAKE_COMPILER_IS_GNUCC)
if (VERBOSE)
message(STATUS "Compiler type GNU: ${CMAKE_CXX_COMPILER}")
endif ()
set(BASE_FLAGS "${BASE_FLAGS} -W -Wextra -Wall -Wsign-compare -Wshadow -Wno-unused-parameter -fno-omit-frame-pointer -momit-leaf-frame-pointer -fno-builtin-memcmp -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX -DROCKSDB_FALLOCATE_PRESENT -DSNAPPY -DZLIB -DROCKSDB_MALLOC_USABLE_SIZE -march=native -isystem -fPIC")
set(BASE_FLAGS "${BASE_FLAGS} -W -Wextra -Wall -Wsign-compare -Wshadow -Wno-unused-parameter -fno-omit-frame-pointer -momit-leaf-frame-pointer -fno-builtin-memcmp -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DOS_LINUX -DROCKSDB_FALLOCATE_PRESENT -DSNAPPY -DZLIB -DROCKSDB_MALLOC_USABLE_SIZE -march=native -isystem -fPIC -D${OS_DEFINE}")
set(CMAKE_C_FLAGS "-g" CACHE INTERNAL "default C compiler flags")
set(CMAKE_C_FLAGS_DEBUG "-O0 -g -Werror" CACHE INTERNAL "C debug flags")
@ -97,7 +154,7 @@ elseif (CMAKE_COMPILER_IS_CLANG)
message(STATUS "Compiler type CLANG: ${CMAKE_CXX_COMPILER}")
endif ()
set(BASE_FLAGS "${BASE_FLAGS} -Wall -Wextra -Wno-unused-parameter")
set(BASE_FLAGS "${BASE_FLAGS} -Wall -Wextra -Wno-unused-parameter -D${OS_DEFINE}")
set(CMAKE_C_FLAGS "-g" CACHE INTERNAL "default C compiler flags")
set(CMAKE_C_FLAGS_DEBUG "-O0 -g" CACHE INTERNAL "C debug flags")
@ -113,7 +170,7 @@ elseif (CMAKE_COMPILER_IS_CLANG)
elseif (MSVC)
if (VERBOSE)
message(STATUS "Compiler type MSVC: ${CMAKE_CXX_COMPILER}")
message(STATUS "Compiler type MSVC: ${CMAKE_CXX_COMPILER} -D${OS_DEFINE}")
endif ()
set(CMAKE_C_FLAGS "/MTd" CACHE INTERNAL "default C++ compiler flags")
@ -145,7 +202,7 @@ else ()
# unknown compiler
message(STATUS "Compiler type UNKNOWN: ${CMAKE_CXX_COMPILER}")
set(BASE_FLAGS "${BASE_FLAGS} -Wall")
set(BASE_FLAGS "${BASE_FLAGS} -Wall -D${OS_DEFINE}")
set(CMAKE_C_FLAGS "-g" CACHE INTERNAL "default C compiler flags")
set(CMAKE_C_FLAGS_DEBUG "-O0 -g" CACHE INTERNAL "C debug flags")

View File

@ -159,10 +159,61 @@ The AQL optimizer rule "merge-traversal-filter" that already existed in 3.0 was
"optimize-traversals". This should be of no relevance to client applications except if
they programatically look for applied optimizer rules in the explain out of AQL queries.
!SECTION Upgraded V8 version
The V8 engine that is used inside ArangoDB to execute JavaScript code has been upgraded from
version 4.3.61 to 5.0.71.39. The new version should be mostly compatible to the old version,
but there may be subtle differences, including changes of error message texts thrown by the
engine.
Furthermore, some V8 startup parameters have changed their meaning or have been removed in
the new version. This is only relevant when ArangoDB or ArangoShell are started with a custom
value for the `--javascript.v8-options` startup option.
Among others, the following V8 options change in the new version of ArangoDB:
- `--es_staging`: in 2.8 it had the meaning `enable all completed harmony features`, in 3.0
the option means `enable test-worthy harmony features (for internal use only)`
- `--strong_this`: this option wasn't present in 2.8. In 3.0 it means `don't allow 'this' to escape from constructors`
and defaults to true.
- `--harmony_regexps`: this options means `enable "harmony regular expression extensions"`
and changes its default value from false to true
- `--harmony_proxies`: this options means `enable "harmony proxies"` and changes its default
value from false to true
- `--harmony_reflect`: this options means `enable "harmony Reflect API"` and changes its default
value from false to true
- `--harmony_sloppy`: this options means `enable "harmony features in sloppy mode"` and changes
its default value from false to true
- `--harmony_tostring`: this options means `enable "harmony toString"` and changes its default
value from false to true
- `--harmony_unicode_regexps`: this options means `enable "harmony unicode regexps"` and changes
its default value from false to true
- `--harmony_arrays`, `--harmony_array_includes`, `--harmony_computed_property_names`,
`--harmony_arrow_functions`, `--harmony_rest_parameters`, `--harmony_classes`,
`--harmony_object_literals`, `--harmony_numeric_literals`, `--harmony_unicode`:
these option have been removed in V8 5.
!SECTION JavaScript API changes
The following incompatible changes have been made to the JavaScript API in ArangoDB 3.0:
!SUBSECTION Foxx
The Foxx framework has been completely rewritten for 3.0 with a new, simpler and more
familiar API. To make Foxx applications from earlier ArangoDB versions run in 3.0
without code changes, the application's manifest file needs to be edited.
To enable the legacy mode for a Foxx application, add `"engines": {"arangodb": "^2.8.0"}`
(or similar version ranges that exclude 3.0 and up) to the service manifest file
(named "manifest.json", located in the application's base directory).
!SUBSECTION Edges API
When completely replacing an edge via a collection's `replace()` function the replacing

View File

@ -118,7 +118,7 @@ std::vector<arangodb::consensus::index_t> State::log (
_log.push_back(log_t(idx[j], term, lid, buf)); // log to RAM
persist(idx[j], term, lid, i[0]); // log to disk
if (idx[j] > 0 && (idx[j] % _compaction_step) == 0) {
compact(idx[j]);
//compact(idx[j]);
}
++j;
}
@ -145,7 +145,7 @@ bool State::log(query_t const& queries, term_t term,
_log.push_back(log_t(idx, term, lid, buf));
persist(idx, term, lid, i.get("query")); // to disk
if (idx > 0 && (idx % _compaction_step) == 0) {
compact(idx);
//compact(idx);
}
} catch (std::exception const& e) {
LOG(ERR) << e.what();
@ -321,6 +321,54 @@ bool State::find (arangodb::consensus::index_t prevIndex, term_t prevTerm) {
bool State::compact (arangodb::consensus::index_t cind) {
bool saved = persistSpearhead(cind);
if (saved) {
compactPersistedState(cind);
compactVolatileState(cind);
return true;
} else {
return false;
}
}
bool State::compactVolatileState (arangodb::consensus::index_t cind) {
_log.erase(_log.begin(), _log.begin()+_compaction_step-1);
_cur = _log.begin()->index;
return true;
}
bool State::compactPersistedState (arangodb::consensus::index_t cind) {
auto bindVars = std::make_shared<VPackBuilder>();
bindVars->openObject();
bindVars->close();
std::string const aql(
std::string(
"FOR l IN log FILTER u._key < 'deleted' REMOVE l IN log"));
arangodb::aql::Query query(false, _vocbase,
aql.c_str(), aql.size(), bindVars, nullptr,
arangodb::aql::PART_MAIN);
auto queryResult = query.execute(QueryRegistryFeature::QUERY_REGISTRY);
if (queryResult.code != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(queryResult.code, queryResult.details);
}
VPackSlice result = queryResult.result->slice();
LOG(INFO) << result.toJson();
return true;
}
bool State::persistSpearhead (arangodb::consensus::index_t cind) {
if (checkCollection("compact")) {
Builder store;
@ -348,10 +396,8 @@ bool State::compact (arangodb::consensus::index_t cind) {
auto result = trx.insert("compact", store.slice(), _options);
res = trx.finish(result.code);
/*if (res == TRI_ERROR_NO_ERROR) {
_log.erase(_log.begin(), _log.begin()+_compaction_step-1);
_cur = _log.begin()->index;
}*/
return (res == TRI_ERROR_NO_ERROR);
}
LOG_TOPIC (ERR, Logger::AGENCY) << "Compaction failed!";

View File

@ -134,6 +134,10 @@ private:
bool compact (arangodb::consensus::index_t cind);
bool compactPersistedState (arangodb::consensus::index_t cind);
bool compactVolatileState (arangodb::consensus::index_t cind);
bool persistSpearhead (arangodb::consensus::index_t cind);
Agent* _agent;
TRI_vocbase_t* _vocbase;

View File

@ -92,10 +92,18 @@ std::vector<check_t> Supervision::check (std::string const& path) {
continue;
} else {
query_t report = std::make_shared<Builder>();
report->openArray(); report->openArray(); report->openObject();
report->openArray(); report->openArray();
report->add(VPackValue(std::string("/arango/Supervision/Health/" + serverID)));
try {
VPackObjectBuilder c(&(*report));
report->add("Status", VPackValue("GOOD"));
report->close(); report->close(); report->close();
LOG(DEBUG) << "GOOD:" << serverID<< it->second->serverTimestamp << ":" << it->second->serverStatus;
report->add("LastHearbeat", VPackValue("GOOD"));
} catch (std::exception const& e) {
LOG(ERR) << e.what();
}
report->close(); report->close();
LOG_TOPIC(DEBUG, Logger::AGENCY) << "GOOD:" << serverID <<
it->second->serverTimestamp << ":" << it->second->serverStatus;
it->second->update(lastHeartbeatStatus,lastHeartbeatTime);
}
} else { // New server

View File

@ -37,7 +37,6 @@
#include "Cluster/ServerState.h"
#include "Endpoint/Endpoint.h"
#include "Logger/Logger.h"
#include "Logger/Logger.h"
#include "Random/RandomGenerator.h"
#include "Rest/HttpRequest.h"
#include "Rest/HttpResponse.h"
@ -593,7 +592,8 @@ bool AgencyComm::tryConnect() {
// mop: not sure if a timeout makes sense here
while (true) {
LOG(INFO) << "Trying to find an active agency. Checking " << endpointsStr;
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
<< "Trying to find an active agency. Checking " << endpointsStr;
std::list<AgencyEndpoint*>::iterator it = _globalEndpoints.begin();
while (it != _globalEndpoints.end()) {
@ -862,8 +862,8 @@ void AgencyComm::disconnect() {
bool AgencyComm::addEndpoint(std::string const& endpointSpecification,
bool toFront) {
LOG(TRACE) << "adding global agency-endpoint '" << endpointSpecification
<< "'";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "adding global agency-endpoint '" << endpointSpecification << "'";
{
WRITE_LOCKER(writeLocker, AgencyComm::_globalLock);
@ -1048,7 +1048,8 @@ bool AgencyComm::setPrefix(std::string const& prefix) {
}
}
LOG(TRACE) << "setting agency-prefix to '" << prefix << "'";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "setting agency-prefix to '" << prefix << "'";
return true;
}
@ -1284,8 +1285,8 @@ AgencyCommResult AgencyComm::getValues(std::string const& key, bool recursive) {
}
}
} else if (node.isArray()) {
LOG(ERR) << node.toJson();
LOG(ERR) << "Oops...TODO array unexpected";
LOG_TOPIC(ERR, Logger::AGENCYCOMM) << node.toJson();
LOG_TOPIC(ERR, Logger::AGENCYCOMM) << "Oops...TODO array unexpected";
} else {
builder.add("value", node);
}
@ -1333,13 +1334,16 @@ AgencyCommResult AgencyComm::getValues(std::string const& key, bool recursive) {
VPackStringSink sink(&result._body);
VPackDumper::dump(s, &sink, &dumperOptions);
LOG(TRACE) << "Created fake etcd node" << result._body;
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "Created fake etcd node" << result._body;
result._statusCode = 200;
} catch(std::exception &e) {
LOG(ERR) << "Error transforming result. " << e.what();
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "Error transforming result. " << e.what();
result.clear();
} catch(...) {
LOG(ERR) << "Error transforming result. Out of memory";
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "Error transforming result. Out of memory";
result.clear();
}
@ -1390,10 +1394,12 @@ AgencyCommResult AgencyComm::getValues2(std::string const& key) {
result._statusCode = 200;
} catch(std::exception &e) {
LOG(ERR) << "Error transforming result. " << e.what();
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "Error transforming result. " << e.what();
result.clear();
} catch(...) {
LOG(ERR) << "Error transforming result. Out of memory";
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "Error transforming result. Out of memory";
result.clear();
}
@ -1576,7 +1582,8 @@ uint64_t AgencyComm::uniqid(uint64_t count, double timeout) {
{prefixStripped(), "Sync", "LatestID"}));
if (!(oldSlice.isSmallInt() || oldSlice.isUInt())) {
LOG(WARN) << "Sync/LatestID in agency is not an unsigned integer, fixing...";
LOG_TOPIC(WARN, Logger::AGENCYCOMM)
<< "Sync/LatestID in agency is not an unsigned integer, fixing...";
try {
VPackBuilder builder;
builder.add(VPackValue(0));
@ -1866,10 +1873,12 @@ AgencyCommResult AgencyComm::sendTransactionWithFailover(
result._statusCode = 200;
} catch(std::exception &e) {
LOG(ERR) << "Error transforming result. " << e.what();
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "Error transforming result. " << e.what();
result.clear();
} catch(...) {
LOG(ERR) << "Error transforming result. Out of memory";
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "Error transforming result. Out of memory";
result.clear();
}
return result;
@ -1964,7 +1973,8 @@ AgencyCommResult AgencyComm::sendWithFailover(
if (!AgencyComm::hasEndpoint(endpoint)) {
AgencyComm::addEndpoint(endpoint, true);
LOG(INFO) << "adding agency-endpoint '" << endpoint << "'";
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
<< "adding agency-endpoint '" << endpoint << "'";
// re-check the new endpoint
if (AgencyComm::hasEndpoint(endpoint)) {
@ -1972,7 +1982,8 @@ AgencyCommResult AgencyComm::sendWithFailover(
continue;
}
LOG(ERR) << "found redirection to unknown endpoint '" << endpoint
LOG_TOPIC(ERR, Logger::AGENCYCOMM)
<< "found redirection to unknown endpoint '" << endpoint
<< "'. Will not follow!";
// this is an error
@ -2029,7 +2040,8 @@ AgencyCommResult AgencyComm::send(
result._connected = false;
result._statusCode = 0;
LOG(TRACE) << "sending " << arangodb::HttpRequest::translateMethod(method)
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "sending " << arangodb::HttpRequest::translateMethod(method)
<< " request to agency at endpoint '"
<< connection->getEndpoint()->specification() << "', url '" << url
<< "': " << body;
@ -2052,7 +2064,7 @@ AgencyCommResult AgencyComm::send(
if (response == nullptr) {
connection->disconnect();
result._message = "could not send request to agency";
LOG(TRACE) << "sending request to agency failed";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM) << "sending request to agency failed";
return result;
}
@ -2060,7 +2072,7 @@ AgencyCommResult AgencyComm::send(
if (!response->isComplete()) {
connection->disconnect();
result._message = "sending request to agency failed";
LOG(TRACE) << "sending request to agency failed";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM) << "sending request to agency failed";
return result;
}
@ -2074,7 +2086,8 @@ AgencyCommResult AgencyComm::send(
bool found = false;
result._location = response->getHeaderField("location", found);
LOG(TRACE) << "redirecting to location: '" << result._location << "'";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "redirecting to location: '" << result._location << "'";
if (!found) {
// a 307 without a location header does not make any sense
@ -2090,7 +2103,8 @@ AgencyCommResult AgencyComm::send(
result._body = std::string(sb.c_str(), sb.length());
result._statusCode = response->getHttpReturnCode();
LOG(TRACE) << "request to agency returned status code " << result._statusCode
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "request to agency returned status code " << result._statusCode
<< ", message: '" << result._message << "', body: '"
<< result._body << "'";

View File

@ -251,7 +251,8 @@ ClusterInfo* ClusterInfo::instance() { return _instance.get(); }
ClusterInfo::ClusterInfo(AgencyCallbackRegistry* agencyCallbackRegistry)
: _agency(), _agencyCallbackRegistry(agencyCallbackRegistry), _uniqid() {
_uniqid._currentValue = _uniqid._upperValue = 0ULL;
_uniqid._currentValue = 1ULL;
_uniqid._upperValue = 0ULL;
// Actual loading into caches is postponed until necessary
}

View File

@ -108,7 +108,8 @@ void HeartbeatThread::run() {
////////////////////////////////////////////////////////////////////////////////
void HeartbeatThread::runDBServer() {
LOG(TRACE) << "starting heartbeat thread (DBServer version)";
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "starting heartbeat thread (DBServer version)";
// convert timeout to seconds
double const interval = (double)_interval / 1000.0 / 1000.0;
@ -116,7 +117,8 @@ void HeartbeatThread::runDBServer() {
std::function<bool(VPackSlice const& result)> updatePlan = [&](
VPackSlice const& result) {
if (!result.isNumber()) {
LOG(ERR) << "Plan Version is not a number! " << result.toJson();
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Plan Version is not a number! " << result.toJson();
return false;
}
uint64_t version = result.getNumber<uint64_t>();
@ -126,7 +128,8 @@ void HeartbeatThread::runDBServer() {
MUTEX_LOCKER(mutexLocker, _statusLock);
if (version > _desiredVersions.plan) {
_desiredVersions.plan = version;
LOG(DEBUG) << "Desired Current Version is now " << _desiredVersions.plan;
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
<< "Desired Current Version is now " << _desiredVersions.plan;
doSync = true;
}
}
@ -145,7 +148,8 @@ void HeartbeatThread::runDBServer() {
while (!registered) {
registered = _agencyCallbackRegistry->registerCallback(planAgencyCallback);
if (!registered) {
LOG(ERR) << "Couldn't register plan change in agency!";
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Couldn't register plan change in agency!";
sleep(1);
}
}
@ -155,7 +159,7 @@ void HeartbeatThread::runDBServer() {
int currentCount = currentCountStart;
while (!isStopping()) {
LOG(DEBUG) << "sending heartbeat to agency";
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "sending heartbeat to agency";
double const start = TRI_microtime();
@ -171,7 +175,8 @@ void HeartbeatThread::runDBServer() {
currentCount = currentCountStart;
// send an initial GET request to Sync/Commands/my-id
LOG(TRACE) << "Looking at Sync/Commands/" + _myId;
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "Looking at Sync/Commands/" + _myId;
AgencyCommResult result =
_agency.getValues2("Sync/Commands/" + _myId);
@ -184,17 +189,19 @@ void HeartbeatThread::runDBServer() {
break;
}
LOG(TRACE) << "Refetching Current/Version...";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Refetching Current/Version...";
AgencyCommResult res = _agency.getValues2("Current/Version");
if (!res.successful()) {
LOG(ERR) << "Could not read Current/Version from agency.";
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Could not read Current/Version from agency.";
} else {
VPackSlice s
= res.slice()[0].get(std::vector<std::string>(
{_agency.prefixStripped(), std::string("Current"),
std::string("Version")}));
if (!s.isInteger()) {
LOG(ERR) << "Current/Version in agency is not an integer.";
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Current/Version in agency is not an integer.";
} else {
uint64_t currentVersion = 0;
try {
@ -202,13 +209,15 @@ void HeartbeatThread::runDBServer() {
} catch (...) {
}
if (currentVersion == 0) {
LOG(ERR) << "Current/Version in agency is 0.";
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Current/Version in agency is 0.";
} else {
{
MUTEX_LOCKER(mutexLocker, _statusLock);
if (currentVersion > _desiredVersions.current) {
_desiredVersions.current = currentVersion;
LOG(DEBUG) << "Found greater Current/Version in agency.";
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
<< "Found greater Current/Version in agency.";
}
}
syncDBServerStatusQuo();
@ -224,7 +233,7 @@ void HeartbeatThread::runDBServer() {
double remain = interval - (TRI_microtime() - start);
// mop: execute at least once
do {
LOG(TRACE) << "Entering update loop";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Entering update loop";
bool wasNotified;
{
@ -238,7 +247,7 @@ void HeartbeatThread::runDBServer() {
}
if (!wasNotified) {
LOG(TRACE) << "Lock reached timeout";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Lock reached timeout";
planAgencyCallback->refetchAndUpdate(true);
} else {
// mop: a plan change returned successfully...
@ -263,7 +272,8 @@ void HeartbeatThread::runDBServer() {
}
usleep(1000);
}
LOG(TRACE) << "stopped heartbeat thread (DBServer version)";
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "stopped heartbeat thread (DBServer version)";
}
////////////////////////////////////////////////////////////////////////////////
@ -271,7 +281,8 @@ void HeartbeatThread::runDBServer() {
////////////////////////////////////////////////////////////////////////////////
void HeartbeatThread::runCoordinator() {
LOG(TRACE) << "starting heartbeat thread (coordinator version)";
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "starting heartbeat thread (coordinator version)";
uint64_t oldUserVersion = 0;
@ -286,7 +297,7 @@ void HeartbeatThread::runCoordinator() {
setReady();
while (!isStopping()) {
LOG(TRACE) << "sending heartbeat to agency";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "sending heartbeat to agency";
double const start = TRI_microtime();
// send our state to the agency.
@ -305,9 +316,11 @@ void HeartbeatThread::runCoordinator() {
AgencyCommResult result = _agency.sendTransactionWithFailover(trx);
if (!result.successful()) {
LOG(WARN) << "Heartbeat: Could not read from agency!";
LOG_TOPIC(WARN, Logger::HEARTBEAT)
<< "Heartbeat: Could not read from agency!";
} else {
LOG(TRACE) << "Looking at Sync/Commands/" + _myId;
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "Looking at Sync/Commands/" + _myId;
handleStateChange(result);
@ -355,7 +368,8 @@ void HeartbeatThread::runCoordinator() {
TRI_UseCoordinatorDatabaseServer(_server, i->c_str());
if (vocbase != nullptr) {
LOG(INFO) << "Reloading users for database " << vocbase->_name
LOG_TOPIC(INFO, Logger::HEARTBEAT)
<< "Reloading users for database " << vocbase->_name
<< ".";
if (!fetchUsers(vocbase)) {
@ -409,7 +423,7 @@ void HeartbeatThread::runCoordinator() {
}
LOG(TRACE) << "stopped heartbeat thread";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "stopped heartbeat thread";
}
////////////////////////////////////////////////////////////////////////////////
@ -431,14 +445,16 @@ bool HeartbeatThread::init() {
////////////////////////////////////////////////////////////////////////////////
void HeartbeatThread::removeDispatchedJob(ServerJobResult result) {
LOG(TRACE) << "Dispatched job returned!";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Dispatched job returned!";
{
MUTEX_LOCKER(mutexLocker, _statusLock);
if (result.success) {
LOG(DEBUG) << "Sync request successful. Now have Plan " << result.planVersion << ", Current " << result.currentVersion;
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
<< "Sync request successful. Now have Plan " << result.planVersion
<< ", Current " << result.currentVersion;
_currentVersions = AgencyVersions(result);
} else {
LOG(DEBUG) << "Sync request failed!";
LOG_TOPIC(DEBUG, Logger::HEARTBEAT) << "Sync request failed!";
// mop: we will retry immediately so wait at least a LITTLE bit
usleep(10000);
}
@ -458,7 +474,7 @@ static std::string const prefixPlanChangeCoordinator = "Plan/Databases";
bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) {
bool fetchingUsersFailed = false;
LOG(TRACE) << "found a plan update";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "found a plan update";
AgencyCommResult result;
{
@ -489,7 +505,8 @@ bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) {
}
auto nameSlice = options.value.get("name");
if (nameSlice.isNone()) {
LOG (ERR) << "Missing name in agency database plan";
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Missing name in agency database plan";
continue;
}
@ -502,8 +519,9 @@ bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) {
try {
id = std::stoul(v.copyString());
} catch (std::exception const& e) {
LOG(ERR) << "Failed to convert id string to number";
LOG(ERR) << e.what();
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Failed to convert id string to number";
LOG_TOPIC(ERR, Logger::HEARTBEAT) << e.what();
}
}
}
@ -579,7 +597,8 @@ bool HeartbeatThread::handlePlanChangeCoordinator(uint64_t currentPlanVersion) {
// turn on error logging now
if (!ClusterComm::instance()->enableConnectionErrorLogging(true)) {
LOG(INFO) << "created coordinator databases for the first time";
LOG_TOPIC(INFO, Logger::HEARTBEAT)
<< "created coordinator databases for the first time";
}
return true;
@ -603,15 +622,16 @@ bool HeartbeatThread::syncDBServerStatusQuo() {
}
if (_desiredVersions.plan > _currentVersions.plan) {
LOG(DEBUG) << "Plan version " << _currentVersions.plan
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
<< "Plan version " << _currentVersions.plan
<< " is lower than desired version " << _desiredVersions.plan;
_isDispatchingChange = true;
becauseOfPlan = true;
}
if (_desiredVersions.current > _currentVersions.current) {
LOG(DEBUG) << "Current version " << _currentVersions.current
<< " is lower than desired version "
<< _desiredVersions.current;
LOG_TOPIC(DEBUG, Logger::HEARTBEAT)
<< "Current version " << _currentVersions.current
<< " is lower than desired version " << _desiredVersions.current;
_isDispatchingChange = true;
becauseOfCurrent = true;
}
@ -628,22 +648,23 @@ bool HeartbeatThread::syncDBServerStatusQuo() {
ci->invalidateCurrent();
}
LOG(TRACE) << "Dispatching Sync";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "Dispatching Sync";
// schedule a job for the change
std::unique_ptr<arangodb::rest::Job> job(new ServerJob(this));
auto dispatcher = DispatcherFeature::DISPATCHER;
if (dispatcher == nullptr) {
LOG(ERR) << "could not schedule dbserver sync - dispatcher gone.";
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "could not schedule dbserver sync - dispatcher gone.";
return false;
}
if (dispatcher->addJob(job) == TRI_ERROR_NO_ERROR) {
LOG(TRACE) << "scheduled dbserver sync";
LOG_TOPIC(TRACE, Logger::HEARTBEAT) << "scheduled dbserver sync";
return true;
}
MUTEX_LOCKER(mutexLocker, _statusLock);
_isDispatchingChange = false;
LOG(ERR) << "could not schedule dbserver sync";
LOG_TOPIC(ERR, Logger::HEARTBEAT) << "could not schedule dbserver sync";
}
return false;
}
@ -690,7 +711,8 @@ bool HeartbeatThread::sendState() {
if (++_numFails % _maxFailsBeforeWarning == 0) {
std::string const endpoints = AgencyComm::getEndpointsString();
LOG(WARN) << "heartbeat could not be sent to agency endpoints ("
LOG_TOPIC(WARN, Logger::HEARTBEAT)
<< "heartbeat could not be sent to agency endpoints ("
<< endpoints << "): http code: " << result.httpCode()
<< ", body: " << result.body();
_numFails = 0;
@ -708,7 +730,8 @@ bool HeartbeatThread::fetchUsers(TRI_vocbase_t* vocbase) {
VPackBuilder builder;
builder.openArray();
LOG(TRACE) << "fetching users for database '" << vocbase->_name << "'";
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "fetching users for database '" << vocbase->_name << "'";
int res = usersOnCoordinator(std::string(vocbase->_name), builder, 10.0);
@ -740,11 +763,12 @@ bool HeartbeatThread::fetchUsers(TRI_vocbase_t* vocbase) {
}
if (result) {
LOG(TRACE) << "fetching users for database '" << vocbase->_name
<< "' successful";
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "fetching users for database '" << vocbase->_name << "' successful";
_refetchUsers.erase(vocbase);
} else {
LOG(TRACE) << "fetching users for database '" << vocbase->_name
LOG_TOPIC(TRACE, Logger::HEARTBEAT)
<< "fetching users for database '" << vocbase->_name
<< "' failed with error: " << TRI_errno_string(res);
_refetchUsers.insert(vocbase);
}

View File

@ -142,7 +142,6 @@ class HeartbeatThread : public Thread {
bool fetchUsers(TRI_vocbase_t*);
//////////////////////////////////////////////////////////////////////////////
/// @brief bring the db server in sync with the desired state
//////////////////////////////////////////////////////////////////////////////

View File

@ -226,8 +226,10 @@ bool HttpServer::handleRequestAsync(HttpCommTask* task,
if (res != TRI_ERROR_NO_ERROR) {
job->requestStatisticsAgentSetExecuteError();
job->RequestStatisticsAgent::transferTo(task);
if (res != TRI_ERROR_DISPATCHER_IS_STOPPING) {
LOG(WARN) << "unable to add job to the job queue: "
<< TRI_errno_string(res);
}
// todo send info to async work manager?
return false;
}

View File

@ -1649,7 +1649,7 @@ int RestReplicationHandler::processRestoreCollectionCoordinator(
if (!shardKeys.isObject()) {
// set default shard key
toMerge.add("shardKeys", VPackValue(VPackValueType::Array));
toMerge.add(VPackValue(std::string(TRI_VOC_ATTRIBUTE_KEY)));
toMerge.add(VPackValue(StaticStrings::KeyString));
toMerge.close(); // end of shardKeys
}
@ -2005,7 +2005,7 @@ static int restoreDataParser(char const* ptr, char const* pos,
return TRI_ERROR_HTTP_CORRUPTED_JSON;
} catch (...) {
return TRI_ERROR_HTTP_CORRUPTED_JSON;
return TRI_ERROR_INTERNAL;
}
VPackSlice const slice = builder.slice();
@ -2018,7 +2018,7 @@ static int restoreDataParser(char const* ptr, char const* pos,
type = REPLICATION_INVALID;
for (auto const& pair : VPackObjectIterator(slice)) {
for (auto const& pair : VPackObjectIterator(slice, true)) {
if (!pair.key.isString()) {
errorMsg = invalidMsg;
@ -2043,14 +2043,20 @@ static int restoreDataParser(char const* ptr, char const* pos,
if (pair.value.isObject()) {
doc = pair.value;
if (doc.hasKey(TRI_VOC_ATTRIBUTE_KEY)) {
key = doc.get(TRI_VOC_ATTRIBUTE_KEY).copyString();
if (doc.hasKey(StaticStrings::KeyString)) {
key = doc.get(StaticStrings::KeyString).copyString();
}
else if (useRevision && doc.hasKey(TRI_VOC_ATTRIBUTE_REV)) {
rev = doc.get(TRI_VOC_ATTRIBUTE_REV).copyString();
else if (useRevision && doc.hasKey(StaticStrings::RevString)) {
rev = doc.get(StaticStrings::RevString).copyString();
}
}
}
else if (attributeName == "key") {
if (key.empty()) {
key = pair.value.copyString();
}
}
}
if (type == REPLICATION_MARKER_DOCUMENT && !doc.isObject()) {
@ -2059,6 +2065,7 @@ static int restoreDataParser(char const* ptr, char const* pos,
}
if (key.empty()) {
LOG(ERR) << "GOT EXCEPTION 5";
errorMsg = invalidMsg;
return TRI_ERROR_HTTP_BAD_PARAMETER;
@ -2109,7 +2116,7 @@ int RestReplicationHandler::processRestoreDataBatch(
oldBuilder.clear();
oldBuilder.openObject();
oldBuilder.add(TRI_VOC_ATTRIBUTE_KEY, VPackValue(key));
oldBuilder.add(StaticStrings::KeyString, VPackValue(key));
oldBuilder.close();
res = applyCollectionDumpMarker(trx, resolver, collectionName, type,

View File

@ -59,20 +59,19 @@ static void raceForClusterBootstrap() {
auto ci = ClusterInfo::instance();
while (true) {
AgencyCommResult result = agency.getValues("Bootstrap", false);
if (!result.successful() &&
result.httpCode() != (int) arangodb::GeneralResponse::ResponseCode::NOT_FOUND) {
// Error in communication, we could live with NOT_FOUND or OK
AgencyCommResult result = agency.getValues2("Bootstrap");
if (!result.successful()) {
// Error in communication, note that value not found is not an error
LOG_TOPIC(TRACE, Logger::STARTUP)
<< "raceForClusterBootstrap: no agency communication";
sleep(1);
continue;
}
result.parse("", false);
auto it = result._values.begin();
if (it != result._values.end()) {
VPackSlice value = it->second._vpack->slice();
if (value.isString() && value.isEqualString("done")) {
VPackSlice value = result.slice()[0].get(std::vector<std::string>(
{agency.prefixStripped(), "Bootstrap"}));
if (value.isString()) {
// key was found and is a string
if (value.isEqualString("done")) {
// all done, let's get out of here:
LOG_TOPIC(TRACE, Logger::STARTUP)
<< "raceForClusterBootstrap: bootstrap already done";

View File

@ -47,6 +47,7 @@
$('#subNavigationBar .breadcrumb').html('');
$('#subNavigationBar .bottom').html('');
$('#loadingScreen').hide();
$('#content').show();
if (callback) {
callback.apply(this, args);
}

View File

@ -46,18 +46,22 @@
return;
}
console.log(event);
var callback = function(error) {
if (error) {
if (event.type === 'focusout') {
//$('#loginForm input').addClass("form-error");
$('.wrong-credentials').show();
$('#loginDatabase').html('');
$('#loginDatabase').append(
'<option>_system</option>'
);
$('#loginDatabase').prop('disabled', true);
$('#submitLogin').prop('disabled', true);
}
}
else {
$('.wrong-credentials').hide();
if (!self.loggedIn) {
self.loggedIn = true;
//get list of allowed dbs
$.ajax("/_api/database/user").success(function(data) {
@ -73,11 +77,9 @@
});
});
}
}
}.bind(this);
this.collection.login(username, password, callback);
},
goTo: function (e) {

View File

@ -6,9 +6,22 @@
height: auto !important;
}
.accordion-heading a {
.accordion-heading {
padding-top: 15px;
a {
border: 1px solid $c-accordion-heading;
color: $c-black;
font-weight: 400;
width: 397px !important;
}
.accordion-toggle {
border-radius: 3px;
box-sizing: border-box;
display: block;
padding: 8px 15px;
width: 100% !important;
}
}

View File

@ -55,6 +55,17 @@
}
}
.checking-password {
box-sizing: border-box;
color: rgba(0, 0, 0, .5);
margin-left: -21px;
margin-top: -25px;
position: absolute;
text-align: center;
width: 100%;
}
form {
.fa {
color: rgba(0, 0, 0, .2);
float: left;
@ -63,6 +74,7 @@
margin-top: 11px;
position: absolute;
}
}
.wrong-credentials {
color: $c-negative;

View File

@ -50,6 +50,16 @@
width: 40px !important;
}
.primary {
.version {
display: none;
}
}
.shortcut-icons {
display: none;
}
.arango-collection-ul {
.tab {
font-size: 12pt;
@ -74,6 +84,8 @@
}
}
//actions sub menu
.headerButtonBar {
display: none;

View File

@ -29,6 +29,7 @@
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var wait = require("internal").wait;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
@ -77,15 +78,6 @@ function agencyTestSuite () {
assertEqual(res.statusCode, 200);
}
function sleep(milliseconds) {
var start = new Date().getTime();
for (var i = 0; i < 1e7; i++) {
if ((new Date().getTime() - start) > milliseconds){
break;
}
}
}
return {
@ -190,19 +182,19 @@ function agencyTestSuite () {
assertEqual(readAndCheck([["a/z"]]), [{"a":{"z":12}}]);
writeAndCheck([[{"a/y":{"op":"set","new":12, "ttl": 1}}]]);
assertEqual(readAndCheck([["a/y"]]), [{"a":{"y":12}}]);
sleep(1250);
wait(1.250);
assertEqual(readAndCheck([["a/y"]]), [{a:{}}]);
writeAndCheck([[{"a/y":{"op":"set","new":12, "ttl": 1}}]]);
writeAndCheck([[{"a/y":{"op":"set","new":12}}]]);
assertEqual(readAndCheck([["a/y"]]), [{"a":{"y":12}}]);
sleep(1250);
wait(1.250);
assertEqual(readAndCheck([["a/y"]]), [{"a":{"y":12}}]);
writeAndCheck([[{"foo/bar":{"op":"set","new":{"baz":12}}}]]);
assertEqual(readAndCheck([["/foo/bar/baz"]]), [{"foo":{"bar":{"baz":12}}}]);
assertEqual(readAndCheck([["/foo/bar"]]), [{"foo":{"bar":{"baz":12}}}]);
assertEqual(readAndCheck([["/foo"]]), [{"foo":{"bar":{"baz":12}}}]);
writeAndCheck([[{"foo/bar":{"op":"set","new":{"baz":12},"ttl":1}}]]);
sleep(1250);
wait(1.250);
assertEqual(readAndCheck([["/foo"]]), [{"foo":{}}]);
assertEqual(readAndCheck([["/foo/bar"]]), [{"foo":{}}]);
assertEqual(readAndCheck([["/foo/bar/baz"]]), [{"foo":{}}]);

View File

@ -37,10 +37,12 @@ Mutex LogTopic::_namesLock;
std::map<std::string, LogTopic*> LogTopic::_names;
LogTopic Logger::AGENCY("agency", LogLevel::INFO);
LogTopic Logger::AGENCYCOMM("agencycomm", LogLevel::INFO);
LogTopic Logger::COLLECTOR("collector");
LogTopic Logger::COMPACTOR("compactor");
LogTopic Logger::CONFIG("config");
LogTopic Logger::DATAFILES("datafiles", LogLevel::INFO);
LogTopic Logger::HEARTBEAT("heartbeat", LogLevel::INFO);
LogTopic Logger::MMAP("mmap");
LogTopic Logger::PERFORMANCE("performance", LogLevel::FATAL); // suppress
LogTopic Logger::QUERIES("queries", LogLevel::INFO);

View File

@ -128,10 +128,12 @@ class Logger {
public:
static LogTopic AGENCY;
static LogTopic AGENCYCOMM;
static LogTopic COLLECTOR;
static LogTopic COMPACTOR;
static LogTopic CONFIG;
static LogTopic DATAFILES;
static LogTopic HEARTBEAT;
static LogTopic MMAP;
static LogTopic PERFORMANCE;
static LogTopic QUERIES;