1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into agency

This commit is contained in:
Kaveh Vahedipour 2016-03-18 10:45:15 +01:00
commit d6db18d655
35 changed files with 862 additions and 606 deletions

View File

@ -35,7 +35,7 @@ set(OPENSSL_INCLUDE_DIR
"OpenSSL: Include Directory"
)
if (WIN32)
if (WIN32 AND NOT SSL_NUGET)
if (DEBUG)
set(OPENSSL_EXT "d")
else ()

View File

@ -33,12 +33,17 @@ endif()
# detect 32bit or 64bit
if ("${BITS}" STREQUAL "64")
set(V8_PROC_ARCH "x64")
if ("${CMAKE_TARGET_ARCHITECTURES}" STREQUAL "armv7")
set(V8_PROC_ARCH "arm")
list(APPEND V8_GYP_ARGS -Darm_version=7 -Darm_fpu=default -Darm_float_abi=default)
else ()
message(ERROR "#### WILLY FIX ME ####")
set(V8_PROC_ARCH "ia32")
endif () # TODO: ARM? Win32?
if ("${BITS}" STREQUAL "64")
set(V8_PROC_ARCH "x64")
else ()
set(V8_PROC_ARCH "ia32")
endif ()
endif()
# target architecture
@ -119,7 +124,7 @@ if (${CMAKE_GENERATOR} MATCHES "Ninja")
# Gyp wants to use its own clang if we don't set this:
list(APPEND V8_GYP_ARGS
-Dclang=0)
# -Dv8_use_snapshot=true
list(APPEND V8_GYP_ARGS
-I${V8_DIR}/build/standalone.gypi
--depth=.
@ -131,6 +136,7 @@ if (${CMAKE_GENERATOR} MATCHES "Ninja")
)
set(NINJA_BUILD_DIR "${V8_TARGET_DIR}/${NINJA_TARGET}")
ExternalProject_Add(v8_build
SOURCE_DIR
"${V8_DIR}"
@ -323,6 +329,10 @@ else ()
option(USE_DEBUG_V8 "compile V8 in DEBUG mode" OFF)
set(V8_CFLAGS "")
set(V8_CXXFLAGS "")
set(V8_LDFLAGS "")
if (USE_DEBUG_V8)
set(V8_TARGET_ARCH "${V8_PROC_ARCH}.debug")
else ()
@ -361,6 +371,11 @@ else ()
-Dwerror=
)
if (APPLE AND CMAKE_COMPILER_IS_CLANG)
set(V8_CXXFLAGS "${V8_CXXFLAGS} -stdlib=libc++")
set(V8_LDFLAGS "${V8_LDFLAGS} -stdlib=libc++")
endif ()
set(V8_COMPILE_ARGS
-C ${V8_TARGET_DIR}
-f Makefile.${V8_TARGET_ARCH}
@ -368,12 +383,11 @@ else ()
builddir=${V8_TARGET_DIR}/${V8_TARGET_ARCH}
CC=${CMAKE_C_COMPILER}
CXX=${CMAKE_CXX_COMPILER}
CFLAGS=${V8_CFLAGS}
CXXFLAGS=${V8_CXXFLAGS}
LDFLAGS=${V8_LDFLAGS}
)
if (APPLE AND CMAKE_COMPILER_IS_CLANG)
list(APPEND V8_COMPILE_ARGS CXXFLAGS=-stdlib=libc++ LDFLAGS=-stdlib=libc++)
endif ()
list(APPEND V8_COMPILE_ARGS icui18n icuuc icudata)
list(APPEND V8_COMPILE_ARGS v8)

View File

@ -325,6 +325,7 @@
},
}],
['OS=="solaris"', {'defines': ['_GLIBCXX_USE_C99_MATH']}],
['OS=="solaris"', {'target_defaults': {'cflags': ['-m64']}}],
['OS=="linux" or OS=="freebsd" or OS=="openbsd" or OS=="solaris" \
or OS=="netbsd" or OS=="aix"', {
'target_defaults': {
@ -336,9 +337,8 @@
'-pthread',
'-fno-exceptions',
'-pedantic',
'-m64',
# Don't warn about the "struct foo f = {0};" initialization pattern.
'-Wno-missing-field-initializers',
'-Wno-missing-field-initializers'
],
'cflags_cc': [ '-Wnon-virtual-dtor', '-fno-rtti', '-std=gnu++11' ],
'ldflags': [ '-pthread', '-march=x86-64', '-m64'],

View File

@ -975,12 +975,13 @@
or OS=="netbsd" or OS=="qnx" or OS=="aix"', {
'conditions': [
[ 'v8_no_strict_aliasing==1', {
'cflags': [ '-fno-strict-aliasing -m64' ],
'cflags': [ '-fno-strict-aliasing' ],
}],
], # conditions
}],
['OS=="solaris"', {
'defines': [ '__C99FEATURES__=1' ], # isinf() etc.
'cflags': ['-m64']
}],
['OS=="freebsd" or OS=="openbsd"', {
'cflags': [ '-I/usr/local/include' ],

View File

@ -1,13 +1,14 @@
# -*- mode: CMAKE; -*-
# swagger
add_custom_target (swagger
COMMAND ${PYTHON_EXECUTABLE}
${PROJECT_SOURCE_DIR}/Documentation/Scripts/generateSwagger.py
${PROJECT_SOURCE_DIR}
${PROJECT_SOURCE_DIR}/js/apps/system/_admin/aardvark/APP/api-docs api-docs
${PROJECT_SOURCE_DIR}/Documentation/DocuBlocks/Rest/
> ${PROJECT_SOURCE_DIR}/js/apps/system/_admin/aardvark/APP/api-docs.json)
add_custom_target(swagger
COMMAND ${PROJECT_SOURCE_DIR}/utils/generateSwagger.sh
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
# swagger
add_custom_target(examples
COMMAND ${PROJECT_SOURCE_DIR}/utils/generateExamples.sh
WORKING_DIRECTORY ${PROJECT_SOURCE_DIR})
# manual pages
if (USE_MAINTAINER_MODE)

View File

@ -355,17 +355,16 @@ bool ApplicationCluster::open() {
if (!enabled()) {
return true;
}
ServerState::RoleEnum role = ServerState::instance()->getRole();
// tell the agency that we are ready
{
AgencyComm comm;
AgencyCommResult result;
AgencyComm comm;
AgencyCommResult result;
bool success;
do {
AgencyCommLocker locker("Current", "WRITE");
if (locker.successful()) {
success = locker.successful();
if (success) {
VPackBuilder builder;
try {
VPackObjectBuilder b(&builder);
@ -382,69 +381,22 @@ bool ApplicationCluster::open() {
locker.unlock();
LOG(FATAL) << "unable to register server in agency: http code: " << result.httpCode() << ", body: " << result.body(); FATAL_ERROR_EXIT();
}
if (role == ServerState::ROLE_COORDINATOR) {
VPackBuilder builder;
try {
builder.add(VPackValue("none"));
} catch (...) {
locker.unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
ServerState::instance()->setState(ServerState::STATE_SERVING);
// register coordinator
AgencyCommResult result =
comm.setValue("Current/Coordinators/" + _myId, builder.slice(), 0.0);
if (!result.successful()) {
locker.unlock();
LOG(FATAL) << "unable to register coordinator in agency"; FATAL_ERROR_EXIT();
}
} else if (role == ServerState::ROLE_PRIMARY) {
VPackBuilder builder;
try {
builder.add(VPackValue("none"));
} catch (...) {
locker.unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
ServerState::instance()->setState(ServerState::STATE_SERVINGASYNC);
// register server
AgencyCommResult result =
comm.setValue("Current/DBServers/" + _myId, builder.slice(), 0.0);
if (!result.successful()) {
locker.unlock();
LOG(FATAL) << "unable to register db server in agency"; FATAL_ERROR_EXIT();
}
} else if (role == ServerState::ROLE_SECONDARY) {
std::string keyName = std::string("\"") + _myId + std::string("\"");
VPackBuilder builder;
try {
builder.add(VPackValue(keyName));
} catch (...) {
locker.unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
ServerState::instance()->setState(ServerState::STATE_SYNCING);
// register server
AgencyCommResult result = comm.setValue(
"Current/DBServers/" + ServerState::instance()->getPrimaryId(),
builder.slice(), 0.0);
if (!result.successful()) {
locker.unlock();
LOG(FATAL) << "unable to register secondary db server in agency"; FATAL_ERROR_EXIT();
}
if (success) {
break;
}
}
sleep(1);
} while (true);
ServerState::RoleEnum role = ServerState::instance()->getRole();
if (role == ServerState::ROLE_COORDINATOR) {
ServerState::instance()->setState(ServerState::STATE_SERVING);
} else if (role == ServerState::ROLE_PRIMARY) {
ServerState::instance()->setState(ServerState::STATE_SERVINGASYNC);
} else if (role == ServerState::ROLE_SECONDARY) {
ServerState::instance()->setState(ServerState::STATE_SYNCING);
}
return true;
}

View File

@ -398,7 +398,7 @@ std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
cm->leaseConnection(res->endpoint);
if (nullptr == connection) {
res->status = CL_COMM_ERROR;
res->status = CL_COMM_BACKEND_UNAVAILABLE;
res->errorMessage =
"cannot create connection to server '" + res->serverID + "'";
if (logConnectionErrors()) {
@ -444,7 +444,7 @@ std::unique_ptr<ClusterCommResult> ClusterComm::syncRequest(
if (res->errorMessage == "Request timeout reached") {
res->status = CL_COMM_TIMEOUT;
} else {
res->status = CL_COMM_ERROR;
res->status = CL_COMM_BACKEND_UNAVAILABLE;
}
cm->brokenConnection(connection);
client->invalidateConnection();
@ -1062,7 +1062,7 @@ void ClusterCommThread::run() {
httpclient::ConnectionManager::SingleServerConnection* connection =
cm->leaseConnection(op->result.endpoint);
if (nullptr == connection) {
op->result.status = CL_COMM_ERROR;
op->result.status = CL_COMM_BACKEND_UNAVAILABLE;
op->result.errorMessage = "cannot create connection to server: ";
op->result.errorMessage += op->result.serverID;
if (cc->logConnectionErrors()) {
@ -1112,7 +1112,7 @@ void ClusterCommThread::run() {
op->result.status = CL_COMM_TIMEOUT;
op->result.errorMessage = "timeout";
} else {
op->result.status = CL_COMM_ERROR;
op->result.status = CL_COMM_BACKEND_UNAVAILABLE;
op->result.errorMessage = client->getErrorMessage();
}
cm->brokenConnection(connection);

View File

@ -68,9 +68,10 @@ enum ClusterCommOpStatus {
CL_COMM_TIMEOUT = 4, // no answer received until timeout
CL_COMM_RECEIVED = 5, // answer received
CL_COMM_ERROR = 6, // original request could not be sent
CL_COMM_DROPPED = 7 // operation was dropped, not known
CL_COMM_DROPPED = 7, // operation was dropped, not known
// this is only used to report an error
// in the wait or enquire methods
CL_COMM_BACKEND_UNAVAILABLE = 8 // mop: communication problem with the backend
};
////////////////////////////////////////////////////////////////////////////////

View File

@ -42,6 +42,23 @@ using namespace arangodb::rest;
namespace arangodb {
static int handleGeneralCommErrors(ClusterCommResult const* res) {
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
return TRI_ERROR_CLUSTER_TIMEOUT;
} else if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (res->result == nullptr || !res->result->isComplete()) {
// there is not result
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
} else if (res->status == CL_COMM_BACKEND_UNAVAILABLE) {
return TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE;
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief extracts a numeric value from an hierarchical VelocyPack
////////////////////////////////////////////////////////////////////////////////
@ -610,20 +627,10 @@ int createDocumentOnCoordinator(
StringUtils::urlEncode(shardID) + "&waitForSync=" +
(waitForSync ? "true" : "false"),
body, headers, 60.0);
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
return TRI_ERROR_CLUSTER_TIMEOUT;
}
if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (res->result == nullptr || !res->result->isComplete()) {
// there is not result
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
// In this case a proper HTTP error was reported by the DBserver,
// this can be 400 or 404, we simply forward the result.
// We intentionally fall through here.
int commError = handleGeneralCommErrors(res.get());
if (commError != TRI_ERROR_NO_ERROR) {
return commError;
}
responseCode = static_cast<arangodb::rest::HttpResponse::HttpResponseCode>(
res->result->getHttpReturnCode());
@ -700,19 +707,10 @@ int deleteDocumentOnCoordinator(
"/" + StringUtils::urlEncode(key) + "?waitForSync=" +
(waitForSync ? "true" : "false") + revstr + policystr,
"", *headers, 60.0);
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
return TRI_ERROR_CLUSTER_TIMEOUT;
}
if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (res->result == nullptr || !res->result->isComplete()) {
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
// In this case a proper HTTP error was reported by the DBserver,
// this can be 400 or 404, we simply forward the result.
// We intentionally fall through here.
int error = handleGeneralCommErrors(res.get());
if (error != TRI_ERROR_NO_ERROR) {
return error;
}
responseCode = static_cast<arangodb::rest::HttpResponse::HttpResponseCode>(
res->result->getHttpReturnCode());
@ -880,18 +878,9 @@ int getDocumentOnCoordinator(
"/" + StringUtils::urlEncode(key) + revstr,
"", *headers, 60.0);
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
return TRI_ERROR_CLUSTER_TIMEOUT;
}
if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (!res->result || !res->result->isComplete()) {
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
// In this case a proper HTTP error was reported by the DBserver,
// this can be 400 or 404, we simply forward the result.
// We intentionally fall through here.
int error = handleGeneralCommErrors(res.get());
if (error != TRI_ERROR_NO_ERROR) {
return error;
}
responseCode = static_cast<arangodb::rest::HttpResponse::HttpResponseCode>(
res->result->getHttpReturnCode());
@ -1144,10 +1133,14 @@ int getAllDocumentsOnCoordinator(
for (count = (int)shards->size(); count > 0; count--) {
auto res = cc->wait("", coordTransactionID, 0, "", 0.0);
if (res.status == CL_COMM_TIMEOUT) {
LOG(TRACE) << "Response status " << res.status;
int error = handleGeneralCommErrors(&res);
if (error != TRI_ERROR_NO_ERROR) {
cc->drop("", coordTransactionID, 0, "");
return TRI_ERROR_CLUSTER_TIMEOUT;
return error;
}
if (res.status == CL_COMM_ERROR || res.status == CL_COMM_DROPPED ||
res.answer_code == arangodb::rest::HttpResponse::NOT_FOUND) {
cc->drop("", coordTransactionID, 0, "");
@ -1262,16 +1255,15 @@ int getFilteredEdgesOnCoordinator(
for (count = (int)shards->size(); count > 0; count--) {
auto res = cc->wait("", coordTransactionID, 0, "", 0.0);
if (res.status == CL_COMM_TIMEOUT) {
int error = handleGeneralCommErrors(&res);
if (error != TRI_ERROR_NO_ERROR) {
cc->drop("", coordTransactionID, 0, "");
return TRI_ERROR_CLUSTER_TIMEOUT;
return error;
}
if (res.status == CL_COMM_ERROR || res.status == CL_COMM_DROPPED) {
cc->drop("", coordTransactionID, 0, "");
return TRI_ERROR_INTERNAL;
}
if (res.status == CL_COMM_RECEIVED) {
}
std::unique_ptr<TRI_json_t> shardResult(
TRI_JsonString(TRI_UNKNOWN_MEM_ZONE, res.answer->body()));
@ -1451,20 +1443,12 @@ int modifyDocumentOnCoordinator(
StringUtils::urlEncode(key) + "?waitForSync=" +
(waitForSync ? "true" : "false") + revstr + policystr,
*(body.get()), *headers, 60.0);
int error = handleGeneralCommErrors(res.get());
if (error != TRI_ERROR_NO_ERROR) {
return error;
}
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
return TRI_ERROR_CLUSTER_TIMEOUT;
}
if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (res->result == nullptr || !res->result->isComplete()) {
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
// In this case a proper HTTP error was reported by the DBserver,
// this can be 400 or 404, we simply forward the result.
// We intentionally fall through here.
}
// Now we have to distinguish whether we still have to go the slow way:
responseCode = static_cast<arangodb::rest::HttpResponse::HttpResponseCode>(
res->result->getHttpReturnCode());
@ -1585,19 +1569,9 @@ int createEdgeOnCoordinator(
StringUtils::urlEncode(from) + "&to=" + StringUtils::urlEncode(to),
body, headers, 60.0);
if (res->status == CL_COMM_TIMEOUT) {
// No reply, we give up:
return TRI_ERROR_CLUSTER_TIMEOUT;
}
if (res->status == CL_COMM_ERROR) {
// This could be a broken connection or an Http error:
if (res->result == nullptr || !res->result->isComplete()) {
// there is not result
return TRI_ERROR_CLUSTER_CONNECTION_LOST;
}
// In this case a proper HTTP error was reported by the DBserver,
// this can be 400 or 404, we simply forward the result.
// We intentionally fall through here.
int commError = handleGeneralCommErrors(res.get());
if (commError != TRI_ERROR_NO_ERROR) {
return commError;
}
responseCode = static_cast<arangodb::rest::HttpResponse::HttpResponseCode>(
res->result->getHttpReturnCode());

View File

@ -37,6 +37,12 @@ using namespace arangodb::basics;
/// running
////////////////////////////////////////////////////////////////////////////////
static bool isClusterRole(ServerState::RoleEnum role) {
return (role == ServerState::ROLE_PRIMARY ||
role == ServerState::ROLE_SECONDARY ||
role == ServerState::ROLE_COORDINATOR);
}
static ServerState Instance;
ServerState::ServerState()
@ -172,10 +178,25 @@ void ServerState::setAuthentication(std::string const& username,
std::string ServerState::getAuthentication() { return _authentication; }
////////////////////////////////////////////////////////////////////////////////
/// @brief find and set our role
////////////////////////////////////////////////////////////////////////////////
void ServerState::findAndSetRoleBlocking() {
while (true) {
auto role = determineRole(_localInfo, _id);
std::string roleString = roleToString(role);
LOG(DEBUG) << "Found my role: " << roleString;
if (storeRole(role)) {
break;
}
sleep(1);
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief flush the server state (used for testing)
////////////////////////////////////////////////////////////////////////////////
void ServerState::flush() {
{
WRITE_LOCKER(writeLocker, _lock);
@ -186,8 +207,8 @@ void ServerState::flush() {
_address = ClusterInfo::instance()->getTargetServerEndpoint(_id);
}
storeRole(determineRole(_localInfo, _id));
findAndSetRoleBlocking();
}
////////////////////////////////////////////////////////////////////////////////
@ -227,10 +248,8 @@ bool ServerState::isDBServer(ServerState::RoleEnum role) {
bool ServerState::isRunningInCluster() {
auto role = loadRole();
return (role == ServerState::ROLE_PRIMARY ||
role == ServerState::ROLE_SECONDARY ||
role == ServerState::ROLE_COORDINATOR);
return isClusterRole(role);
}
////////////////////////////////////////////////////////////////////////////////
@ -272,15 +291,8 @@ ServerState::RoleEnum ServerState::getRole() {
LOG(DEBUG) << "Have stored " << builder.slice().toJson() << " under Current/NewServers/" << _localInfo << " in agency.";
}
// role not yet set
role = determineRole(info, id);
std::string roleString = roleToString(role);
LOG(DEBUG) << "Found my role: " << roleString;
storeRole(role);
return role;
findAndSetRoleBlocking();
return loadRole();
}
////////////////////////////////////////////////////////////////////////////////
@ -351,8 +363,8 @@ bool ServerState::registerWithRole(ServerState::RoleEnum role) {
}
_id = id;
storeRole(role);
findAndSetRoleBlocking();
LOG(DEBUG) << "We successfully announced ourselves as " << roleToString(role) << " and our id is " << id;
return true;
@ -779,7 +791,9 @@ bool ServerState::redetermineRole() {
RoleEnum oldRole = loadRole();
if (role != oldRole) {
LOG(INFO) << "Changed role to: " << roleString;
storeRole(role);
if (!storeRole(role)) {
return false;
}
return true;
}
if (_idOfPrimary != saveIdOfPrimary) {
@ -806,21 +820,12 @@ ServerState::RoleEnum ServerState::determineRole(std::string const& info,
LOG(DEBUG) << "Learned my own Id: " << id;
setId(id);
}
ServerState::RoleEnum role = checkServersList(id);
ServerState::RoleEnum role2 = checkCoordinatorsList(id);
ServerState::RoleEnum role = checkCoordinatorsList(id);
if (role == ServerState::ROLE_UNDEFINED) {
// role is still unknown. check if we are a coordinator
role = role2;
} else {
// we are a primary or a secondary.
// now we double-check that we are not a coordinator as well
if (role2 != ServerState::ROLE_UNDEFINED) {
role = ServerState::ROLE_UNDEFINED;
}
role = checkServersList(id);
}
// mop: role might still be undefined
return role;
}
@ -1056,3 +1061,81 @@ ServerState::RoleEnum ServerState::checkServersList(std::string const& id) {
return role;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief store the server role
//////////////////////////////////////////////////////////////////////////////
bool ServerState::storeRole(RoleEnum role)
{
if (isClusterRole(role)) {
AgencyComm comm;
AgencyCommResult result;
std::unique_ptr<AgencyCommLocker> locker;
locker.reset(new AgencyCommLocker("Current", "WRITE"));
if (!locker->successful()) {
return false;
}
if (role == ServerState::ROLE_COORDINATOR) {
VPackBuilder builder;
try {
builder.add(VPackValue("none"));
} catch (...) {
locker->unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
// register coordinator
AgencyCommResult result =
comm.setValue("Current/Coordinators/" + _id, builder.slice(), 0.0);
if (!result.successful()) {
locker->unlock();
LOG(FATAL) << "unable to register coordinator in agency"; FATAL_ERROR_EXIT();
}
} else if (role == ServerState::ROLE_PRIMARY) {
VPackBuilder builder;
try {
builder.add(VPackValue("none"));
} catch (...) {
locker->unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
// register server
AgencyCommResult result =
comm.setValue("Current/DBServers/" + _id, builder.slice(), 0.0);
if (!result.successful()) {
locker->unlock();
LOG(FATAL) << "unable to register db server in agency"; FATAL_ERROR_EXIT();
}
} else if (role == ServerState::ROLE_SECONDARY) {
std::string keyName = _id;
VPackBuilder builder;
try {
builder.add(VPackValue(keyName));
} catch (...) {
locker->unlock();
LOG(FATAL) << "out of memory"; FATAL_ERROR_EXIT();
}
// register server
AgencyCommResult result = comm.casValue(
"Current/DBServers/" + ServerState::instance()->getPrimaryId(),
builder.slice(),
true,
0.0,
0.0);
if (!result.successful()) {
locker->unlock();
// mop: fail gracefully (allow retry)
return false;
}
}
}
_role.store(role, std::memory_order_release);
return true;
}

View File

@ -374,14 +374,16 @@ class ServerState {
RoleEnum loadRole() {
return static_cast<RoleEnum>(_role.load(std::memory_order_consume));
}
//////////////////////////////////////////////////////////////////////////////
/// @brief determine role and save role blocking
//////////////////////////////////////////////////////////////////////////////
void findAndSetRoleBlocking();
//////////////////////////////////////////////////////////////////////////////
/// @brief atomically stores the server role
/// @brief store the server role
//////////////////////////////////////////////////////////////////////////////
void storeRole(RoleEnum role) {
_role.store(role, std::memory_order_release);
}
bool storeRole(RoleEnum role);
//////////////////////////////////////////////////////////////////////////////
/// @brief determine the server role

View File

@ -446,6 +446,10 @@ void RestVocbaseBaseHandler::generateTransactionError(
case TRI_ERROR_CLUSTER_TIMEOUT:
generateError(HttpResponse::SERVER_ERROR, res);
return;
case TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE:
generateError(HttpResponse::SERVICE_UNAVAILABLE, res, "A required backend was not available");
return;
case TRI_ERROR_CLUSTER_MUST_NOT_CHANGE_SHARDING_ATTRIBUTES:
case TRI_ERROR_CLUSTER_MUST_NOT_SPECIFY_KEY: {

View File

@ -1997,7 +1997,7 @@ int ArangoServer::runScript(TRI_vocbase_t* vocbase) {
v8::Context::Scope contextScope(localContext);
for (size_t i = 0; i < _scriptFile.size(); ++i) {
bool r =
TRI_ExecuteGlobalJavaScriptFile(isolate, _scriptFile[i].c_str());
TRI_ExecuteGlobalJavaScriptFile(isolate, _scriptFile[i].c_str(), true);
if (!r) {
LOG(FATAL) << "cannot load script '" << _scriptFile[i]

View File

@ -138,8 +138,8 @@ void V8ShellFeature::stop() {
v8::Locker locker{_isolate};
v8::Isolate::Scope isolate_scope{_isolate};
TRI_v8_global_t* v8g = \
static_cast<TRI_v8_global_t*>(_isolate->GetData(V8DataSlot));
TRI_v8_global_t* v8g =
static_cast<TRI_v8_global_t*>(_isolate->GetData(V8DataSlot));
_isolate->SetData(V8DataSlot, nullptr);
delete v8g;
@ -246,8 +246,7 @@ V8ClientConnection* V8ShellFeature::setup(
v8connection = std::make_unique<V8ClientConnection>(
connection, client->databaseName(), client->username(),
client->password(), client->requestTimeout());
}
else {
} else {
client = nullptr;
}
}
@ -285,8 +284,10 @@ int V8ShellFeature::runShell(std::vector<std::string> const& positionals) {
V8LineEditor v8LineEditor(_isolate, context, "." + _name + ".history");
v8LineEditor.setSignalFunction(
[&v8connection]() { v8connection->setInterrupted(true); });
if (v8connection != nullptr) {
v8LineEditor.setSignalFunction(
[&v8connection]() { v8connection->setInterrupted(true); });
}
v8LineEditor.open(_console->autoComplete());
@ -372,7 +373,9 @@ int V8ShellFeature::runShell(std::vector<std::string> const& positionals) {
promptError = true;
}
v8connection->setInterrupted(false);
if (v8connection != nullptr) {
v8connection->setInterrupted(false);
}
_console->stopPager();
_console->printLine("");
@ -442,7 +445,7 @@ bool V8ShellFeature::runScript(std::vector<std::string> const& files,
current->ForceSet(TRI_V8_ASCII_STRING2(_isolate, "__dirname"),
TRI_V8_STD_STRING2(_isolate, dirname));
ok = TRI_ExecuteGlobalJavaScriptFile(_isolate, file.c_str());
ok = TRI_ExecuteGlobalJavaScriptFile(_isolate, file.c_str(), true);
// restore old values for __dirname and __filename
if (oldFilename.IsEmpty() || oldFilename->IsUndefined()) {
@ -465,7 +468,7 @@ bool V8ShellFeature::runScript(std::vector<std::string> const& files,
ok = false;
}
} else {
ok = TRI_ParseJavaScriptFile(_isolate, file.c_str());
ok = TRI_ParseJavaScriptFile(_isolate, file.c_str(), true);
}
}
@ -566,7 +569,7 @@ bool V8ShellFeature::jslint(std::vector<std::string> const& files) {
TRI_ExecuteJavaScriptString(_isolate, context, input, name, true);
if (tryCatch.HasCaught()) {
LOG(ERR) << TRI_StringifyV8Exception(_isolate, &tryCatch);
LOG(ERR) << TRI_StringifyV8Exception(_isolate, &tryCatch);
ok = false;
} else {
bool res = TRI_ObjectToBoolean(context->Global()->Get(
@ -855,8 +858,7 @@ void V8ShellFeature::initMode(ShellFeature::RunMode runMode,
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_UNIT_TESTS"),
v8::Boolean::New(_isolate,
runMode == ShellFeature::RunMode::UNIT_TESTS));
v8::Boolean::New(_isolate, runMode == ShellFeature::RunMode::UNIT_TESTS));
TRI_AddGlobalVariableVocbase(
_isolate, context, TRI_V8_ASCII_STRING2(_isolate, "IS_JS_LINT"),

View File

@ -70,23 +70,43 @@ if(OPENSSL_USE_STATIC_LIBS)
endif()
if (WIN32)
# http://www.slproweb.com/products/Win32OpenSSL.html
set(_OPENSSL_ROOT_HINTS
${OPENSSL_ROOT_DIR}
"[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\OpenSSL (32-bit)_is1;Inno Setup: App Path]"
"[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\OpenSSL (64-bit)_is1;Inno Setup: App Path]"
ENV OPENSSL_ROOT_DIR
)
file(TO_CMAKE_PATH "$ENV{PROGRAMFILES}" _programfiles)
set(_OPENSSL_ROOT_PATHS
"${_programfiles}/OpenSSL"
"${_programfiles}/OpenSSL-Win32"
"${_programfiles}/OpenSSL-Win64"
"C:/OpenSSL/"
"C:/OpenSSL-Win32/"
"C:/OpenSSL-Win64/"
)
unset(_programfiles)
if (IS_DIRECTORY "${OPENSSL_ROOT_DIR}/build/native/")
set(SSL_NUGET TRUE)
else()
set(SSL_NUGET FALSE)
endif()
if (OPENSSL_ROOT_DIR AND SSL_NUGET)
message("Found nuGET installation of OpenSSL!")
set(SSL_BITS "x64")
# its an openssl downloaded via nuget!
set(OPENSSL_INCLUDE "${OPENSSL_ROOT_DIR}/build/native/include")
set(_OPENSSL_ROOT_HINTS "${OPENSSL_ROOT_DIR}/build/native/include")
set(OPENSSL_LIB_DIR "${OPENSSL_ROOT_DIR}/lib/native/v140/windesktop/msvcstl/dyn/rt-dyn/${SSL_BITS}")
set(_OPENSSL_ROOT_HINTS "${OPENSSL_ROOT_DIR}/build/native/include")
set(_OPENSSL_ROOT_PATHS
"${OPENSSL_ROOT_DIR}/build/native/include"
"${OPENSSL_ROOT_DIR}/lib/native/v140/windesktop/msvcstl/dyn/rt-dyn/${SSL_BITS}/")
else()
# http://www.slproweb.com/products/Win32OpenSSL.html
set(_OPENSSL_ROOT_HINTS
${OPENSSL_ROOT_DIR}
"[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\OpenSSL (32-bit)_is1;Inno Setup: App Path]"
"[HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\Uninstall\\OpenSSL (64-bit)_is1;Inno Setup: App Path]"
ENV OPENSSL_ROOT_DIR
)
file(TO_CMAKE_PATH "$ENV{PROGRAMFILES}" _programfiles)
set(_OPENSSL_ROOT_PATHS
"${_programfiles}/OpenSSL"
"${_programfiles}/OpenSSL-Win32"
"${_programfiles}/OpenSSL-Win64"
"C:/OpenSSL/"
"C:/OpenSSL-Win32/"
"C:/OpenSSL-Win64/"
)
unset(_programfiles)
endif()
else ()
set(_OPENSSL_ROOT_HINTS
${OPENSSL_ROOT_DIR}
@ -110,7 +130,60 @@ find_path(OPENSSL_INCLUDE_DIR
)
if(WIN32 AND NOT CYGWIN)
if(MSVC)
if (SSL_NUGET)
# /MD and /MDd are the standard values - if someone wants to use
# others, the libnames have to change here too
# use also ssl and ssleay32 in debug as fallback for openssl < 0.9.8b
# enable OPENSSL_MSVC_STATIC_RT to get the libs build /MT (Multithreaded no-DLL)
# Implementation details:
# We are using the libraries located in the VC subdir instead of the parent directory eventhough :
# libeay32MD.lib is identical to ../libeay32.lib, and
# ssleay32MD.lib is identical to ../ssleay32.lib
# enable OPENSSL_USE_STATIC_LIBS to use the static libs located in lib/VC/static
#if (OPENSSL_MSVC_STATIC_RT)
# set(_OPENSSL_MSVC_RT_MODE "MT")
#else ()
# set(_OPENSSL_MSVC_RT_MODE "MD")
#endif ()
set(LIB_EAY_DEBUG LIB_EAY_DEBUG-NOTFOUND)
if (EXISTS "${OPENSSL_LIB_DIR}/debug/libeay32.lib")
set(LIB_EAY_DEBUG "${OPENSSL_LIB_DIR}/debug/libeay32.lib")
endif()
set(LIB_EAY_RELEASE LIB_EAY_RELEASE-NOTFOUND)
if (EXISTS "${OPENSSL_LIB_DIR}/release/libeay32.lib")
set(LIB_EAY_RELEASE "${OPENSSL_LIB_DIR}/release/libeay32.lib")
endif()
set(SSL_EAY_DEBUG SSL_EAY_DEBUG-NOTFOUND)
if (EXISTS "${OPENSSL_LIB_DIR}/debug/ssleay32.lib")
set(SSL_EAY_DEBUG "${OPENSSL_LIB_DIR}/debug/ssleay32.lib")
endif()
set(SSL_EAY_RELEASE SSL_EAY_RELEASE-NOTFOUND)
if (EXISTS "${OPENSSL_LIB_DIR}/release/ssleay32.lib")
set(SSL_EAY_RELEASE "${OPENSSL_LIB_DIR}/release/ssleay32.lib")
endif()
set(LIB_EAY_LIBRARY_DEBUG "${LIB_EAY_DEBUG}")
set(LIB_EAY_LIBRARY_RELEASE "${LIB_EAY_RELEASE}")
set(SSL_EAY_LIBRARY_DEBUG "${SSL_EAY_DEBUG}")
set(SSL_EAY_LIBRARY_RELEASE "${SSL_EAY_RELEASE}")
include(${CMAKE_CURRENT_LIST_DIR}/SelectLibraryConfigurations.cmake)
select_library_configurations(LIB_EAY)
select_library_configurations(SSL_EAY)
mark_as_advanced(LIB_EAY_LIBRARY_DEBUG LIB_EAY_LIBRARY_RELEASE
SSL_EAY_LIBRARY_DEBUG SSL_EAY_LIBRARY_RELEASE)
set(OPENSSL_SSL_LIBRARY ${SSL_EAY_LIBRARY} )
set(OPENSSL_CRYPTO_LIBRARY ${LIB_EAY_LIBRARY} )
set(OPENSSL_LIBRARIES ${SSL_EAY_LIBRARY} ${LIB_EAY_LIBRARY} )
elseif(MSVC)
# /MD and /MDd are the standard values - if someone wants to use
# others, the libnames have to change here too
# use also ssl and ssleay32 in debug as fallback for openssl < 0.9.8b
@ -374,7 +447,6 @@ else ()
endif ()
mark_as_advanced(OPENSSL_INCLUDE_DIR OPENSSL_LIBRARIES)
if(OPENSSL_FOUND)
if(NOT TARGET OpenSSL::Crypto AND
(EXISTS "${OPENSSL_CRYPTO_LIBRARY}" OR

View File

@ -34,6 +34,7 @@
var actions = require("@arangodb/actions");
var cluster = require("@arangodb/cluster");
var internal = require("internal");
var _ = require("lodash");
////////////////////////////////////////////////////////////////////////////////
@ -921,42 +922,34 @@ actions.defineHttp({
function changeAllShardReponsibilities (oldServer, newServer) {
// This is only called when we have the write lock and we "only" have to
// make sure that either all or none of the shards are moved.
var l = ArangoAgency.get("Plan/Collections", true, false);
var ll = Object.keys(l);
var i = 0;
var c;
var oldShards = [];
var shards;
var names;
var j;
var collections = ArangoAgency.get("Plan/Collections", true, false);
var done = {};
try {
while (i < ll.length) {
c = l[ll[i]]; // A collection entry
shards = c.shards;
names = Object.keys(shards);
// Poor man's deep copy:
oldShards.push(JSON.parse(JSON.stringify(shards)));
for (j = 0; j < names.length; j++) {
if (shards[names[j]] === oldServer) {
shards[names[j]] = newServer;
Object.keys(collections).forEach(function(collectionKey) {
var collection = collections[collectionKey];
var old = _.cloneDeep(collection);
Object.keys(collection.shards).forEach(function(shardKey) {
var servers = collection.shards[shardKey];
collection.shards[shardKey] = servers.map(function(server) {
if (server == oldServer) {
return newServer;
} else {
return server;
}
}
ArangoAgency.set(ll[i], c, 0);
i += 1;
}
}
catch (e) {
i -= 1;
while (i >= 0) {
c = l[ll[i]];
c.shards = oldShards[i];
try {
ArangoAgency.set(ll[i], c, 0);
}
catch (e2) {
}
i -= 1;
});
});
ArangoAgency.set(collectionKey, collection, 0);
done[collectionKey] = old;
});
} catch (e) {
// mop: rollback
try {
Object.keys(done).forEach(function(collectionKey) {
ArangoAgency.set(collectionKey, done[collectionKey], 0);
});
} catch (e2) {
console.error("Got error during rolback", e2);
}
throw e;
}

View File

@ -1,4 +1,4 @@
/*jshint strict: false, sub: true */
*jshint strict: false, sub: true */
/*global print, arango */
'use strict';
@ -122,7 +122,7 @@ const optionsDocumentation = [
const optionsDefaults = {
"build": "",
"buildType": "",
"buildType": "",
"cleanup": true,
"cluster": false,
"clusterNodes": 2,
@ -2275,15 +2275,18 @@ const benchTodos = [{
"requests": "500",
"concurrency": "3",
"test-case": "aqltrx",
"complexity": "1"
"complexity": "1",
"transaction": true
}, {
"requests": "100",
"concurrency": "3",
"test-case": "counttrx"
"test-case": "counttrx",
"transaction": true
}, {
"requests": "500",
"concurrency": "3",
"test-case": "multitrx"
"test-case": "multitrx",
"transaction": true
}];
testFuncs.arangob = function(options) {
@ -2329,9 +2332,7 @@ testFuncs.arangob = function(options) {
}
// On the cluster we do not yet have working transaction functionality:
if (!options.cluster ||
(benchTodo.test !== "counttrx" &&
benchTodo.test !== "multitrx")) {
if (!options.cluster || !benchTodo.transaction) {
if (!continueTesting) {
print("Skipping " + benchTodo + ", server is gone.");
@ -2346,7 +2347,8 @@ testFuncs.arangob = function(options) {
break;
}
let args = benchTodo;
let args = _.clone(benchTodo);
delete args.transaction;
if (options.hasOwnProperty('benchargs')) {
args = _.extend(args, options.benchargs);
@ -4168,11 +4170,11 @@ function unitTest(cases, options) {
BIN_DIR = fs.join(TOP_DIR, builddir, "bin");
UNITTESTS_DIR = fs.join(TOP_DIR, fs.join(builddir, "tests"));
if (options.buildType !== "") {
if (options.buildType !== "") {
BIN_DIR = fs.join(BIN_DIR, options.buildType);
UNITTESTS_DIR = fs.join(UNITTESTS_DIR, options.buildType);
}
CONFIG_DIR = fs.join(TOP_DIR, builddir, "etc", "arangodb");
ARANGOB_BIN = fs.join(BIN_DIR, "arangob");
ARANGODUMP_BIN = fs.join(BIN_DIR, "arangodump");

View File

@ -375,19 +375,25 @@ Module._resolveDbModule = function (request) {
if (request.charAt(0) !== '/') {
request = '/' + request;
}
var dbModule = Module._dbCache[request];
if (!dbModule && internal.db._modules !== undefined) {
if (!dbModule && internal.db !== undefined && internal.db._modules !== undefined) {
dbModule = internal.db._modules.firstExample({path: request});
if (!dbModule) {
// try again, but prefix module with '/db' as some modules seem
// to have been saved with that prefix...
dbModule = internal.db._modules.firstExample({path: '/db:' + request});
if (!dbModule) {
return null;
}
}
Module._dbCache[request] = dbModule;
}
return dbModule;
};

View File

@ -4989,7 +4989,11 @@ function AQL_DATE_ISO8601 () {
'use strict';
try {
return MAKE_DATE(arguments, "DATE_ISO8601").toISOString();
var dt = MAKE_DATE(arguments, "DATE_ISO8601");
if (dt === null) {
return dt;
}
return dt.toISOString();
}
catch (err) {
WARN("DATE_ISO8601", INTERNAL.errors.ERROR_QUERY_INVALID_DATE_VALUE);

View File

@ -541,9 +541,9 @@ function handleDatabaseChanges (plan) {
/// @brief create collections if they exist in the plan but not locally
////////////////////////////////////////////////////////////////////////////////
function createLocalCollections (plannedCollections, planVersion) {
function createLocalCollections (plannedCollections, planVersion, takeOverResponsibility) {
var ourselves = global.ArangoServerState.id();
var createCollectionAgency = function (database, shard, collInfo, error) {
var payload = { error: error.error,
errorNum: error.errorNum,
@ -551,10 +551,15 @@ function createLocalCollections (plannedCollections, planVersion) {
indexes: collInfo.indexes,
servers: [ ourselves ],
planVersion: planVersion };
global.ArangoAgency.set("Current/Collections/" + database + "/" +
collInfo.planId + "/" + shard,
payload);
};
// mop: just a function alias but this way one at least knows what it is supposed to do :S
var takeOver = createCollectionAgency;
var db = require("internal").db;
db._useDatabase("_system");
@ -579,198 +584,214 @@ function createLocalCollections (plannedCollections, planVersion) {
var collection;
// diff the collections
for (collection in collections) {
if (collections.hasOwnProperty(collection)) {
var collInfo = collections[collection];
var shards = collInfo.shards;
var shard;
Object.keys(collections).forEach(function(collection) {
var collInfo = collections[collection];
var shards = collInfo.shards;
var shard;
collInfo.planId = collInfo.id;
var save = [collInfo.id, collInfo.name];
delete collInfo.id; // must not actually set it here
delete collInfo.name; // name is now shard
collInfo.planId = collInfo.id;
var save = [collInfo.id, collInfo.name];
delete collInfo.id; // must not actually set it here
delete collInfo.name; // name is now shard
for (shard in shards) {
if (shards.hasOwnProperty(shard)) {
if (shards[shard][0] === ourselves) {
// found a shard we are responsible for
for (shard in shards) {
if (shards.hasOwnProperty(shard)) {
var didWrite = false;
if (shards[shard][0] === ourselves) {
// found a shard we are responsible for
var error = { error: false, errorNum: 0,
errorMessage: "no error" };
var error = { error: false, errorNum: 0,
errorMessage: "no error" };
if (! localCollections.hasOwnProperty(shard)) {
// must create this shard
console.info("creating local shard '%s/%s' for central '%s/%s'",
database,
shard,
database,
collInfo.planId);
if (! localCollections.hasOwnProperty(shard)) {
// must create this shard
console.info("creating local shard '%s/%s' for central '%s/%s'",
database,
shard,
database,
collInfo.planId);
try {
if (collInfo.type === ArangoCollection.TYPE_EDGE) {
db._createEdgeCollection(shard, collInfo);
}
else {
db._create(shard, collInfo);
}
try {
if (collInfo.type === ArangoCollection.TYPE_EDGE) {
db._createEdgeCollection(shard, collInfo);
}
catch (err2) {
error = { error: true, errorNum: err2.errorNum,
errorMessage: err2.errorMessage };
console.error("creating local shard '%s/%s' for central '%s/%s' failed: %s",
database,
shard,
database,
collInfo.planId,
JSON.stringify(err2));
else {
db._create(shard, collInfo);
}
}
catch (err2) {
error = { error: true, errorNum: err2.errorNum,
errorMessage: err2.errorMessage };
console.error("creating local shard '%s/%s' for central '%s/%s' failed: %s",
database,
shard,
database,
collInfo.planId,
JSON.stringify(err2));
}
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
didWrite = true;
}
else {
if (localCollections[shard].status !== collInfo.status) {
console.info("detected status change for local shard '%s/%s'",
database,
shard);
if (collInfo.status === ArangoCollection.STATUS_UNLOADED) {
console.info("unloading local shard '%s/%s'",
database,
shard);
db._collection(shard).unload();
}
else if (collInfo.status === ArangoCollection.STATUS_LOADED) {
console.info("loading local shard '%s/%s'",
database,
shard);
db._collection(shard).load();
}
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
didWrite = true;
}
else {
if (localCollections[shard].status !== collInfo.status) {
console.info("detected status change for local shard '%s/%s'",
database,
shard);
if (collInfo.status === ArangoCollection.STATUS_UNLOADED) {
console.info("unloading local shard '%s/%s'",
database,
shard);
db._collection(shard).unload();
}
else if (collInfo.status === ArangoCollection.STATUS_LOADED) {
console.info("loading local shard '%s/%s'",
database,
shard);
db._collection(shard).load();
}
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
// collection exists, now compare collection properties
var properties = { };
var cmp = [ "journalSize", "waitForSync", "doCompact",
"indexBuckets" ];
for (i = 0; i < cmp.length; ++i) {
var p = cmp[i];
if (localCollections[shard][p] !== collInfo[p]) {
// property change
properties[p] = collInfo[p];
}
}
// collection exists, now compare collection properties
var properties = { };
var cmp = [ "journalSize", "waitForSync", "doCompact",
"indexBuckets" ];
for (i = 0; i < cmp.length; ++i) {
var p = cmp[i];
if (localCollections[shard][p] !== collInfo[p]) {
// property change
properties[p] = collInfo[p];
}
if (Object.keys(properties).length > 0) {
console.info("updating properties for local shard '%s/%s'",
database,
shard);
try {
db._collection(shard).properties(properties);
}
catch (err3) {
error = { error: true, errorNum: err3.errorNum,
errorMessage: err3.errorMessage };
}
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
didWrite = true;
}
}
if (Object.keys(properties).length > 0) {
console.info("updating properties for local shard '%s/%s'",
if (error.error) {
if (takeOverResponsibility && !didWrite) {
writeLocked({ part: "Current" },
takeOver,
[ database, shard, collInfo, error ]);
}
continue; // No point to look for properties and
// indices, if the creation has not worked
}
var indexes = getIndexMap(shard);
var idx;
var index;
if (collInfo.hasOwnProperty("indexes")) {
for (i = 0; i < collInfo.indexes.length; ++i) {
index = collInfo.indexes[i];
var changed = false;
if (index.type !== "primary" && index.type !== "edge" &&
! indexes.hasOwnProperty(index.id)) {
console.info("creating index '%s/%s': %s",
database,
shard);
shard,
JSON.stringify(index));
try {
db._collection(shard).properties(properties);
arangodb.db._collection(shard).ensureIndex(index);
index.error = false;
index.errorNum = 0;
index.errorMessage = "";
}
catch (err3) {
error = { error: true, errorNum: err3.errorNum,
errorMessage: err3.errorMessage };
catch (err5) {
index.error = true;
index.errorNum = err5.errorNum;
index.errorMessage = err5.errorMessage;
}
changed = true;
}
if (changed) {
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
didWrite = true;
}
}
if (error.error) {
continue; // No point to look for properties and
// indices, if the creation has not worked
}
var changed2 = false;
for (idx in indexes) {
if (indexes.hasOwnProperty(idx)) {
// found an index in the index map, check if it must be deleted
var indexes = getIndexMap(shard);
var idx;
var index;
if (collInfo.hasOwnProperty("indexes")) {
for (i = 0; i < collInfo.indexes.length; ++i) {
index = collInfo.indexes[i];
var changed = false;
if (index.type !== "primary" && index.type !== "edge" &&
! indexes.hasOwnProperty(index.id)) {
console.info("creating index '%s/%s': %s",
database,
shard,
JSON.stringify(index));
try {
arangodb.db._collection(shard).ensureIndex(index);
index.error = false;
index.errorNum = 0;
index.errorMessage = "";
}
catch (err5) {
index.error = true;
index.errorNum = err5.errorNum;
index.errorMessage = err5.errorMessage;
}
changed = true;
}
if (changed) {
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
}
}
var changed2 = false;
for (idx in indexes) {
if (indexes.hasOwnProperty(idx)) {
// found an index in the index map, check if it must be deleted
if (indexes[idx].type !== "primary" && indexes[idx].type !== "edge") {
var found = false;
for (i = 0; i < collInfo.indexes.length; ++i) {
if (collInfo.indexes[i].id === idx) {
found = true;
break;
}
}
if (! found) {
// found an index to delete locally
changed2 = true;
index = indexes[idx];
console.info("dropping index '%s/%s': %s",
database,
shard,
JSON.stringify(index));
arangodb.db._collection(shard).dropIndex(index);
delete indexes[idx];
collInfo.indexes.splice(i, i);
if (indexes[idx].type !== "primary" && indexes[idx].type !== "edge") {
var found = false;
for (i = 0; i < collInfo.indexes.length; ++i) {
if (collInfo.indexes[i].id === idx) {
found = true;
break;
}
}
if (! found) {
// found an index to delete locally
changed2 = true;
index = indexes[idx];
console.info("dropping index '%s/%s': %s",
database,
shard,
JSON.stringify(index));
arangodb.db._collection(shard).dropIndex(index);
delete indexes[idx];
collInfo.indexes.splice(i, i);
}
}
}
if (changed2) {
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
}
}
if (changed2) {
writeLocked({ part: "Current" },
createCollectionAgency,
[ database, shard, collInfo, error ]);
didWrite = true;
}
}
if (takeOverResponsibility && !didWrite) {
console.info("HMMMM WRITE");
writeLocked({ part: "Current" },
takeOver,
[ database, shard, collInfo, error ]);
}
}
}
collInfo.id = save[0];
collInfo.name = save[1];
}
}
collInfo.id = save[0];
collInfo.name = save[1];
});
}
catch (err) {
// always return to previous database
@ -1062,20 +1083,20 @@ function synchronizeLocalFollowerCollections (plannedCollections) {
/// @brief handle collection changes
////////////////////////////////////////////////////////////////////////////////
function handleCollectionChanges (plan) {
function handleCollectionChanges (plan, takeOverResponsibility) {
var plannedCollections = getByPrefix3d(plan, "Plan/Collections/");
var ok = true;
try {
createLocalCollections(plannedCollections, plan["Plan/Version"]);
createLocalCollections(plannedCollections, plan["Plan/Version"], takeOverResponsibility);
dropLocalCollections(plannedCollections);
cleanupCurrentCollections(plannedCollections);
synchronizeLocalFollowerCollections(plannedCollections);
}
catch (err) {
console.error("Caught error in handleCollectionChanges: " +
JSON.stringify(err));
JSON.stringify(err), JSON.stringify(err.stack));
ok = false;
}
return ok;
@ -1183,8 +1204,11 @@ function handleChanges (plan, current) {
}
else { // role === "SECONDARY"
if (plan.hasOwnProperty("Plan/DBServers/"+myId)) {
// Ooops! We are now a primary!
changed = ArangoServerState.redetermineRole();
if (!changed) {
// mop: oops...changing role has failed. retry next time.
return false;
}
}
else {
var found = null;
@ -1219,7 +1243,7 @@ function handleChanges (plan, current) {
if (role === "PRIMARY" || role === "COORDINATOR") {
// Note: This is only ever called for DBservers (primary and secondary),
// we keep the coordinator case here just in case...
success = handleCollectionChanges(plan, current);
success = handleCollectionChanges(plan, changed);
}
else {
success = setupReplication();

View File

@ -50,6 +50,12 @@ LoggerFeature::LoggerFeature(application_features::ApplicationServer* server)
void LoggerFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
LOG_TOPIC(TRACE, Logger::STARTUP) << name() << "::collectOptions";
options->addSection(
Section("", "Global configuration", "global options", false, false));
options->addOption("--log", "the global or topic-specific log level",
new VectorParameter<StringParameter>(&_levels));
options->addSection("log", "Configure the logging");
options->addOption("--log.output,-o", "log destination(s)",
@ -69,9 +75,6 @@ void LoggerFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
"--log.prefix", "adds a prefix in case multiple instances are running",
new StringParameter(&_prefix));
options->addOption("--log", "the global or topic-specific log level",
new VectorParameter<StringParameter>(&_levels));
options->addHiddenOption("--log.file",
"shortcut for '--log.output file://<filename>'",
new StringParameter(&_file));

View File

@ -2121,6 +2121,15 @@ void TRI_InitializeErrorMessages ();
#define TRI_ERROR_CLUSTER_ONLY_ON_DBSERVER (1477)
////////////////////////////////////////////////////////////////////////////////
/// @brief 1478: ERROR_CLUSTER_BACKEND_UNAVAILABLE
///
/// A cluster backend which was required for the operation could not be reached
///
////////////////////////////////////////////////////////////////////////////////
#define TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE (1478)
////////////////////////////////////////////////////////////////////////////////
/// @brief 1500: ERROR_QUERY_KILLED
///

View File

@ -219,7 +219,11 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
return nullptr;
}
#ifndef _WIN32
return new EndpointSrv(specification.substr(6));
#else
return nullptr;
#endif
}
else if (!StringUtils::isPrefix(domainType, "tcp://")) {

View File

@ -425,6 +425,9 @@ HttpResponse::HttpResponseCode HttpResponse::responseCode(int code) {
case TRI_ERROR_OUT_OF_MEMORY:
case TRI_ERROR_INTERNAL:
return SERVER_ERROR;
case TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE:
return SERVICE_UNAVAILABLE;
case TRI_ERROR_CLUSTER_UNSUPPORTED:
return NOT_IMPLEMENTED;

View File

@ -150,7 +150,7 @@ static void CreateErrorObject(v8::Isolate* isolate, int errorNumber,
////////////////////////////////////////////////////////////////////////////////
static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
bool execute, bool useGlobalContext) {
bool stripShebang, bool execute, bool useGlobalContext) {
v8::HandleScope handleScope(isolate);
size_t length;
@ -162,17 +162,34 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
return false;
}
// detect shebang
size_t bangOffset = 0;
if (stripShebang) {
if (strncmp(content, "#!", 2) == 0) {
// shebang
char const* endOfBang = strchr(content, '\n');
if (endOfBang != nullptr) {
bangOffset = size_t(endOfBang - content + 1);
TRI_ASSERT(bangOffset <= length);
length -= bangOffset;
}
}
}
if (useGlobalContext) {
char const* prologue = "(function() { ";
char const* epilogue = "/* end-of-file */ })()";
char* contentWrapper = TRI_Concatenate3String(TRI_UNKNOWN_MEM_ZONE,
prologue, content, epilogue);
prologue, content + bangOffset, epilogue);
TRI_FreeString(TRI_UNKNOWN_MEM_ZONE, content);
length += strlen(prologue) + strlen(epilogue);
content = contentWrapper;
// shebang already handled here
bangOffset = 0;
}
if (content == nullptr) {
@ -182,7 +199,7 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
}
v8::Handle<v8::String> name = TRI_V8_STRING(filename);
v8::Handle<v8::String> source = TRI_V8_PAIR_STRING(content, (int)length);
v8::Handle<v8::String> source = TRI_V8_PAIR_STRING(content + bangOffset, (int)length);
TRI_FreeString(TRI_UNKNOWN_MEM_ZONE, content);
@ -225,7 +242,7 @@ static bool LoadJavaScriptFile(v8::Isolate* isolate, char const* filename,
////////////////////////////////////////////////////////////////////////////////
static bool LoadJavaScriptDirectory(v8::Isolate* isolate, char const* path,
bool execute, bool useGlobalContext) {
bool stripShebang, bool execute, bool useGlobalContext) {
v8::HandleScope scope(isolate);
bool result;
@ -246,7 +263,7 @@ static bool LoadJavaScriptDirectory(v8::Isolate* isolate, char const* path,
full = TRI_Concatenate2File(path, filename.c_str());
ok = LoadJavaScriptFile(isolate, full, execute, useGlobalContext);
ok = LoadJavaScriptFile(isolate, full, stripShebang, execute, useGlobalContext);
TRI_FreeString(TRI_CORE_MEM_ZONE, full);
result = result && ok;
@ -3828,8 +3845,8 @@ void TRI_LogV8Exception(v8::Isolate* isolate, v8::TryCatch* tryCatch) {
////////////////////////////////////////////////////////////////////////////////
bool TRI_ExecuteGlobalJavaScriptFile(v8::Isolate* isolate,
char const* filename) {
return LoadJavaScriptFile(isolate, filename, true, false);
char const* filename, bool stripShebang) {
return LoadJavaScriptFile(isolate, filename, stripShebang, true, false);
}
////////////////////////////////////////////////////////////////////////////////
@ -3838,7 +3855,7 @@ bool TRI_ExecuteGlobalJavaScriptFile(v8::Isolate* isolate,
bool TRI_ExecuteGlobalJavaScriptDirectory(v8::Isolate* isolate,
char const* path) {
return LoadJavaScriptDirectory(isolate, path, true, false);
return LoadJavaScriptDirectory(isolate, path, false, true, false);
}
////////////////////////////////////////////////////////////////////////////////
@ -3847,15 +3864,16 @@ bool TRI_ExecuteGlobalJavaScriptDirectory(v8::Isolate* isolate,
bool TRI_ExecuteLocalJavaScriptDirectory(v8::Isolate* isolate,
char const* path) {
return LoadJavaScriptDirectory(isolate, path, true, true);
return LoadJavaScriptDirectory(isolate, path, false, true, true);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief parses a file
////////////////////////////////////////////////////////////////////////////////
bool TRI_ParseJavaScriptFile(v8::Isolate* isolate, char const* filename) {
return LoadJavaScriptFile(isolate, filename, false, false);
bool TRI_ParseJavaScriptFile(v8::Isolate* isolate, char const* filename,
bool stripShebang) {
return LoadJavaScriptFile(isolate, filename, stripShebang, false, false);
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -114,7 +114,7 @@ void TRI_LogV8Exception(v8::Isolate* isolate, v8::TryCatch*);
/// @brief reads a file into the current context
////////////////////////////////////////////////////////////////////////////////
bool TRI_ExecuteGlobalJavaScriptFile(v8::Isolate* isolate, char const*);
bool TRI_ExecuteGlobalJavaScriptFile(v8::Isolate* isolate, char const*, bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief reads all files from a directory into the current context
@ -132,7 +132,7 @@ bool TRI_ExecuteLocalJavaScriptDirectory(v8::Isolate* isolate, char const*);
/// @brief parses a file
////////////////////////////////////////////////////////////////////////////////
bool TRI_ParseJavaScriptFile(v8::Isolate* isolate, char const*);
bool TRI_ParseJavaScriptFile(v8::Isolate* isolate, char const*, bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief executes a string within a V8 context, optionally print the result

View File

@ -25,6 +25,8 @@ if [ ! -z "$3" ] ; then
fi
fi
SECONDARIES="$4"
if [ -z "$XTERMOPTIONS" ] ; then
XTERMOPTIONS="-fa Monospace-14 -bg white -fg black -geometry 80x43"
fi
@ -57,7 +59,6 @@ start() {
--log.requests-file cluster/$PORT.req \
--server.disable-statistics true \
--server.foxx-queues false \
--server.foxx-queues false \
--javascript.startup-directory ./js \
--server.disable-authentication true \
--javascript.app-path ./js/apps \
@ -163,6 +164,35 @@ for p in `seq 8530 $PORTTOPCO` ; do
testServer $p
done
if [ -n "$SECONDARIES" ]; then
let index=1
PORTTOPSE=`expr 8729 + $NRDBSERVERS - 1`
for PORT in `seq 8729 $PORTTOPSE` ; do
mkdir cluster/data$PORT
CLUSTER_ID="Secondary$index"
echo Registering secondary $CLUSTER_ID for "DBServer$index"
curl -f -X PUT --data "{\"primary\": \"DBServer$index\", \"oldSecondary\": \"none\", \"newSecondary\": \"$CLUSTER_ID\"}" -H "Content-Type: application/json" localhost:8530/_admin/cluster/replaceSecondary
echo Starting Secondary $CLUSTER_ID on port $PORT
build/bin/arangod --database.directory cluster/data$PORT \
--cluster.agency-endpoint tcp://127.0.0.1:4001 \
--cluster.my-address tcp://127.0.0.1:$PORT \
--server.endpoint tcp://127.0.0.1:$PORT \
--cluster.my-id $CLUSTER_ID \
--log.file cluster/$PORT.log \
--log.requests-file cluster/$PORT.req \
--server.disable-statistics true \
--server.foxx-queues false \
--javascript.startup-directory ./js \
--server.disable-authentication true \
--javascript.app-path ./js/apps \
> cluster/$PORT.stdout 2>&1 &
let index=$index+1
done
fi
echo Bootstrapping DBServers...
curl -s -X POST "http://127.0.0.1:8530/_admin/cluster/bootstrapDbServers" \
-d '{"isRelaunch":false}' >> cluster/DBServersUpgrade.log 2>&1

0
utils/generateErrorfile.py Executable file → Normal file
View File

View File

@ -1,60 +0,0 @@
#!/bin/bash
export PID=$$
if test -n "$ORIGINAL_PATH"; then
# running in cygwin...
PS='\'
export EXT=".exe"
else
export EXT=""
PS='/'
fi;
SCRIPT="utils${PS}generateExamples.js"
LOGFILE="out${PS}log-$PID"
DBDIR="out${PS}data-$PID"
mkdir -p ${DBDIR}
echo Database has its data in ${DBDIR}
echo Logfile is in ${LOGFILE}
if [ -z "${ARANGOD}" ]; then
if [ -x build/bin/arangod ]; then
ARANGOD=build/bin/arangod
elif [ -x bin/arangosh ]; then
ARANGOD=bin/arangod
else
echo "$0: cannot locate arangod"
fi
fi
${ARANGOD} \
--configuration none \
--cluster.agent-path bin${PS}etcd-arango${EXT} \
--cluster.arangod-path bin${PS}arangod \
--cluster.coordinator-config etc${PS}relative${PS}arangod-coordinator.conf \
--cluster.dbserver-config etc${PS}relative${PS}arangod-dbserver.conf \
--cluster.disable-dispatcher-frontend false \
--cluster.disable-dispatcher-kickstarter false \
--cluster.data-path cluster \
--cluster.log-path cluster \
--database.directory ${DBDIR} \
--log.file ${LOGFILE} \
--server.endpoint tcp://127.0.0.1:$PORT \
--javascript.startup-directory js \
--javascript.app-path js${PS}apps \
--javascript.script $SCRIPT \
--no-server \
--temp-path ${PS}var${PS}tmp \
"${ARGS[@]}" \
if test $? -eq 0; then
echo "removing ${LOGFILE} ${DBDIR}"
rm -rf ${LOGFILE} ${DBDIR}
else
echo "failed - don't remove ${LOGFILE} ${DBDIR} - here's the logfile:"
cat ${LOGFILE}
fi
echo Server has terminated.

View File

@ -1,58 +1,93 @@
/*jshint globalstrict:false, unused:false */
/*global start_pretty_print */
'use strict';
var fs = require("fs");
var internal = require("internal");
var executeExternal = require("internal").executeExternal;
var executeExternalAndWait = internal.executeExternalAndWait;
var download = require("internal").download;
var print = internal.print;
var wait = require("internal").wait;
var killExternal = require("internal").killExternal;
var toArgv = require("internal").toArgv;
var statusExternal = require("internal").statusExternal;
const fs = require("fs");
const internal = require("internal");
const executeExternal = internal.executeExternal;
const executeExternalAndWait = internal.executeExternalAndWait;
const download = internal.download;
const print = internal.print;
const wait = internal.wait;
const killExternal = internal.killExternal;
const toArgv = internal.toArgv;
const statusExternal = internal.statusExternal;
const testPort = internal.testPort;
var yaml = require("js-yaml");
var endpointToURL = require("@arangodb/cluster/planner").endpointToURL;
var PortFinder = require("@arangodb/cluster").PortFinder;
const yaml = require("js-yaml");
var documentationSourceDirs = [
const documentationSourceDirs = [
fs.join(fs.makeAbsolute(''), "Documentation/Examples/setup-arangosh.js"),
fs.join(fs.makeAbsolute(''), "Documentation/Books/Users"),
fs.join(fs.makeAbsolute(''), "js/actions"),
fs.join(fs.makeAbsolute(''), "js/client"),
fs.join(fs.makeAbsolute(''), "js/common"),
fs.join(fs.makeAbsolute(''), "js/server"),
fs.join(fs.makeAbsolute(''), "js/apps/system/_api/gharial/APP")];
fs.join(fs.makeAbsolute(''), "Documentation/DocuBlocks"),
fs.join(fs.makeAbsolute(''), "Documentation/Books/Users")
];
var theScript = 'Documentation/Scripts/generateExamples.py';
const theScript = 'utils/generateExamples.py';
var scriptArguments = {
const scriptArguments = {
'outputDir': fs.join(fs.makeAbsolute(''), "Documentation/Examples"),
'outputFile': '/tmp/arangosh.examples.js'
'outputFile': fs.join(fs.makeAbsolute(''), "arangosh.examples.js")
};
function main (argv) {
"use strict";
var thePython = 'python';
var test = argv[1];
var options = {};
var serverEndpoint = '';
var startServer = true;
var instanceInfo = {};
var serverCrashed = false;
var protocol = 'tcp';
var tmpDataDir = fs.getTempFile();
var count = 0;
let ARANGOD;
let ARANGOSH;
if (fs.exists("bin")) {
ARANGOD = fs.join(fs.join(fs.makeAbsolute('')), "bin/arangod");
ARANGOSH = fs.join(fs.join(fs.makeAbsolute('')), "bin/arangosh");
}
else {
ARANGOD = fs.join(fs.join(fs.makeAbsolute('')), "build/bin/arangod");
ARANGOSH = fs.join(fs.join(fs.makeAbsolute('')), "build/bin/arangosh");
}
function endpointToURL(endpoint) {
if (endpoint.substr(0, 6) === "ssl://") {
return "https://" + endpoint.substr(6);
}
const pos = endpoint.indexOf("://");
if (pos === -1) {
return "http://" + endpoint;
}
return "http" + endpoint.substr(pos);
}
function findFreePort() {
while (true) {
const port = Math.floor(Math.random() * (65536 - 1024)) + 1024;
const free = testPort("tcp://0.0.0.0:" + port);
if (free) {
return port;
}
}
return 8529;
}
function main(argv) {
let thePython = 'python';
let options = {};
let serverEndpoint = '';
let startServer = true;
let instanceInfo = {};
let serverCrashed = false;
let protocol = 'tcp';
let tmpDataDir = fs.getTempFile();
let count = 0;
try {
options = internal.parseArgv(argv, 1);
}
catch (x) {
options = internal.parseArgv(argv, 0);
} catch (x) {
print("failed to parse the options: " + x.message);
return -1;
}
print(options);
if (options.hasOwnProperty('withPython')) {
thePython = options.withPython;
}
@ -65,13 +100,13 @@ function main (argv) {
startServer = false;
serverEndpoint = options['server.endpoint'];
}
var args = [theScript].concat(internal.toArgv(scriptArguments));
let args = [theScript].concat(internal.toArgv(scriptArguments));
args = args.concat(['--arangoshSetup']);
args = args.concat(documentationSourceDirs);
// internal.print(JSON.stringify(args));
let res = executeExternalAndWait(thePython, args);
var res = executeExternalAndWait(thePython, args);
if (res.exit !== 0) {
print("parsing the examples failed - aborting!");
print(res);
@ -79,36 +114,46 @@ function main (argv) {
}
if (startServer) {
// We use the PortFinder to find a free port for our subinstance,
// to this end, we have to fake a dummy dispatcher:
var dispatcher = {endpoint: "tcp://127.0.0.1:", avoidPorts: {}, id: "me"};
var pf = new PortFinder([8529],dispatcher);
var port = pf.next();
let port = findFreePort();
instanceInfo.port = port;
serverEndpoint = protocol+"://127.0.0.1:"+port;
serverEndpoint = protocol + "://127.0.0.1:" + port;
instanceInfo.url = endpointToURL(serverEndpoint);
var serverArgs = {};
fs.makeDirectoryRecursive(fs.join(tmpDataDir, "data"));
let serverArgs = {};
serverArgs["configuration"] = "none";
serverArgs["database.directory"] = fs.join(tmpDataDir, "data");
serverArgs["javascript.app-path"] = fs.join(tmpDataDir, "apps");
serverArgs["javascript.startup-directory"] = "js";
serverArgs["log.file"] = fs.join(tmpDataDir, "log");
serverArgs["server.disable-authentication"] = "true";
serverArgs["server.endpoint"] = serverEndpoint;
serverArgs["database.directory"] = fs.join(tmpDataDir,"data");
fs.makeDirectoryRecursive(fs.join(tmpDataDir,"data"));
args["log.file"] = fs.join(tmpDataDir,"log");
instanceInfo.pid = executeExternal(fs.join("bin","arangod"), toArgv(serverArgs));
serverArgs["server.threads"] = "3";
print("================================================================================");
print(toArgv(serverArgs));
instanceInfo.pid = executeExternal(ARANGOD, toArgv(serverArgs));
// Wait until the server is up:
count = 0;
instanceInfo.endpoint = serverEndpoint;
while (true) {
wait(0.5, false);
var r = download(instanceInfo.url + "/_api/version", "");
let r = download(instanceInfo.url + "/_api/version", "");
if (! r.error && r.code === 200) {
if (!r.error && r.code === 200) {
break;
}
count ++;
count++;
if (count % 60 === 0) {
res = statusExternal(instanceInfo.pid, false);
if (res.status !== "RUNNING") {
print("start failed - process is gone: " + yaml.safeDump(res));
return 1;
@ -116,67 +161,71 @@ function main (argv) {
}
}
}
var arangoshArgs = {
let arangoshArgs = {
'configuration': fs.join(fs.makeAbsolute(''), 'etc', 'relative', 'arangosh.conf'),
'server.password': "",
'server.endpoint': serverEndpoint,
'javascript.execute': scriptArguments.outputFile
};
res = executeExternalAndWait('bin/arangosh', internal.toArgv(arangoshArgs));
res = executeExternalAndWait(ARANGOSH, internal.toArgv(arangoshArgs));
if (startServer) {
if (typeof(instanceInfo.exitStatus) === 'undefined') {
download(instanceInfo.url+"/_admin/shutdown","");
download(instanceInfo.url + "/_admin/shutdown", "");
print("Waiting for server shut down");
count = 0;
var bar = "[";
let bar = "[";
while (1) {
instanceInfo.exitStatus = statusExternal(instanceInfo.pid, false);
if (instanceInfo.exitStatus.status === "RUNNING") {
count ++;
count++;
if (typeof(options.valgrind) === 'string') {
wait(1);
continue;
}
if (count % 10 ===0) {
if (count % 10 === 0) {
bar = bar + "#";
}
if (count > 600) {
print("forcefully terminating " + yaml.safeDump(instanceInfo.pid) +
" after 600 s grace period; marking crashy.");
" after 600 s grace period; marking crashy.");
serverCrashed = true;
killExternal(instanceInfo.pid);
break;
}
else {
} else {
wait(1);
}
}
else if (instanceInfo.exitStatus.status !== "TERMINATED") {
} else if (instanceInfo.exitStatus.status !== "TERMINATED") {
if (instanceInfo.exitStatus.hasOwnProperty('signal')) {
print("Server shut down with : " +
yaml.safeDump(instanceInfo.exitStatus) +
" marking build as crashy.");
yaml.safeDump(instanceInfo.exitStatus) +
" marking build as crashy.");
serverCrashed = true;
break;
}
if (require("internal").platform.substr(0,3) === 'win') {
if (internal.platform.substr(0, 3) === 'win') {
// Windows: wait for procdump to do its job...
statusExternal(instanceInfo.monitor, true);
}
}
else {
} else {
print("Server shutdown: Success.");
break; // Success.
}
}
if (count > 10) {
print("long Server shutdown: " + bar + ']');
}
}
}
return 0;
}
main(ARGUMENTS);

View File

@ -1,8 +1,4 @@
################################################################################
### @brief creates examples from documentation files
###
### @file
###
### DISCLAIMER
###
### Copyright by triAGENS GmbH - All rights reserved.
@ -128,14 +124,13 @@ OPTION_OUTPUT_DIR = 2
OPTION_FILTER = 3
OPTION_OUTPUT_FILE = 4
fstate = OPTION_NORMAL
escapeBS = re.compile("\\\\")
doubleBS = "\\\\\\\\"
################################################################################
### @brief generate arangosh example headers with functions etc. needed later
################################################################################
def generateArangoshHeader():
headerF = open("./Documentation/Scripts/exampleHeader.js", "r")
print headerF.read()
@ -144,6 +139,7 @@ def generateArangoshHeader():
################################################################################
### @brief Try to match the start of a command section
################################################################################
regularStartLine = re.compile(r'^(/// )? *@EXAMPLE_ARANGOSH_OUTPUT{([^}]*)}')
runLine = re.compile(r'^(/// )? *@EXAMPLE_ARANGOSH_RUN{([^}]*)}')
@ -160,6 +156,7 @@ def matchStartLine(line, filename):
if name in ArangoshFiles:
print >> sys.stderr, "%s\nduplicate test name '%s' in file %s!\n%s\n" % ('#' * 80, name, filename, '#' * 80)
sys.exit(1)
# if we match for filters, only output these!
if ((FilterForTestcase != None) and not FilterForTestcase.match(name)):
filterTestList.append(name)
@ -184,6 +181,7 @@ def matchStartLine(line, filename):
ArangoshFiles[name] = True
return (name, STATE_ARANGOSH_RUN)
# Not found, remain in STATE_BEGIN
return ("", STATE_BEGIN)
@ -194,9 +192,11 @@ TESTLINES="testlines"
TYPE="type"
LINE_NO="lineNo"
STRING="string"
################################################################################
### @brief loop over the lines of one input file
################################################################################
def analyzeFile(f, filename):
global RunTests, TESTLINES, TYPE, LINE_NO, STRING
strip = None
@ -294,7 +294,6 @@ def generateSetupFunction():
print "(function () {\n%s}());" % ArangoshSetup
print
################################################################################
### @brief generate arangosh example
################################################################################
@ -302,6 +301,7 @@ def generateSetupFunction():
loopDetectRE = re.compile(r'^[ \n]*(while|if|var|throw|for) ')
expectErrorRE = re.compile(r'.*// *xpError\((.*)\).*')
#expectErrorRE = re.compile(r'.*//\s*xpError\(([^)]*)\)/')
def generateArangoshOutput(testName):
value = RunTests[testName]
#print value
@ -370,7 +370,6 @@ def generateArangoshOutput(testName):
}());
'''
################################################################################
### @brief generate arangosh run
################################################################################
@ -429,6 +428,7 @@ def generateArangoshRun(testName):
################################################################################
### @brief generate arangosh run
################################################################################
def generateArangoshShutdown():
print '''
if (allErrors.length > 0) {
@ -437,29 +437,35 @@ if (allErrors.length > 0) {
}
'''
################################################################################
### @brief get file names
################################################################################
def loopDirectories():
global ArangoshSetup, OutputDir, FilterForTestcase
argv = sys.argv
argv.pop(0)
filenames = []
fstate = OPTION_NORMAL
for filename in argv:
if filename == "--arangoshSetup":
fstate = OPTION_ARANGOSH_SETUP
continue
if filename == "--onlyThisOne":
fstate = OPTION_FILTER
continue
if filename == "--outputDir":
fstate = OPTION_OUTPUT_DIR
continue
if filename == "--outputFile":
fstate = OPTION_OUTPUT_FILE
continue
if fstate == OPTION_NORMAL:
if os.path.isdir(filename):
for root, dirs, files in os.walk(filename):
@ -468,11 +474,12 @@ def loopDirectories():
filenames.append(os.path.join(root, file))
else:
filenames.append(filename)
elif fstate == OPTION_FILTER:
fstate = OPTION_NORMAL
if (len(filename) > 0):
FilterForTestcase = re.compile(filename);
elif fstate == OPTION_ARANGOSH_SETUP:
fstate = OPTION_NORMAL
f = open(filename, "r")
@ -486,11 +493,11 @@ def loopDirectories():
elif fstate == OPTION_OUTPUT_DIR:
fstate = OPTION_NORMAL
OutputDir = filename
elif fstate == OPTION_OUTPUT_FILE:
fstate = OPTION_NORMAL
sys.stdout = open(filename, 'w')
print >> sys.stderr, repr(filenames)
for filename in filenames:
if (filename.find("#") < 0):
f = open(filename, "r")
@ -512,10 +519,10 @@ def generateTestCases():
elif RunTests[thisTest][TYPE] == STATE_ARANGOSH_RUN:
generateArangoshRun(thisTest)
################################################################################
### @brief main
################################################################################
loopDirectories()
print >> sys.stderr, "filtering test cases %s" %(filterTestList)

49
utils/generateExamples.sh Executable file
View File

@ -0,0 +1,49 @@
#!/bin/bash
export PID=$$
if test -n "$ORIGINAL_PATH"; then
# running in cygwin...
PS='\'
export EXT=".exe"
else
export EXT=""
PS='/'
fi;
SCRIPT="utils${PS}generateExamples.js"
LOGFILE="out${PS}log-$PID"
DBDIR="out${PS}data-$PID"
mkdir -p ${DBDIR}
echo Database has its data in ${DBDIR}
echo Logfile is in ${LOGFILE}
if [ -z "${ARANGOSH}" ]; then
if [ -x build/bin/arangosh ]; then
ARANGOSH=build/bin/arangosh
elif [ -x bin/arangosh ]; then
ARANGOSH=bin/arangosh
else
echo "$0: cannot locate arangosh"
fi
fi
${ARANGOSH} \
--configuration none \
--server.endpoint none \
--log.file ${LOGFILE} \
--javascript.startup-directory js \
--javascript.execute $SCRIPT \
--server.password "" \
"${ARGS[@]}"
if test $? -eq 0; then
echo "removing ${LOGFILE} ${DBDIR}"
rm -rf ${LOGFILE} ${DBDIR} arangosh.examples.js
else
echo "failed - don't remove ${LOGFILE} ${DBDIR} - here's the logfile:"
cat ${LOGFILE}
fi
echo Server has terminated.

0
utils/generateMimetypes.py Executable file → Normal file
View File

View File

@ -1010,7 +1010,7 @@ def example_arangosh_run(cargo, r=Regexen()):
exampleHeader = brTrim(operation['x-examples'][currentExample]).strip()
# new examples code TODO should include for each example own object in json file
examplefile = open(os.path.join(os.path.dirname(__file__), '../Examples/' + parameters(last) + '.generated'))
examplefile = open(os.path.join(os.path.dirname(__file__), '../Documentation/Examples/' + parameters(last) + '.generated'))
operation['x-examples'][currentExample]= '<details><summary>Example: ' + exampleHeader.strip('\n ') + '</summary><br><br><pre><code class="json">'
for line in examplefile.readlines():

9
utils/generateSwagger.sh Executable file
View File

@ -0,0 +1,9 @@
#!/bin/bash
python \
`pwd`/utils/generateSwagger.py \
`pwd` \
`pwd`/js/apps/system/_admin/aardvark/APP/api-docs \
api-docs \
`pwd`/Documentation/DocuBlocks/Rest \
> `pwd`/js/apps/system/_admin/aardvark/APP/api-docs.json