1
0
Fork 0

Merge branch 'devel' into obi-velocystream-try-merge-devel

* devel: (24 commits)
  minor fixes
  fix effect that on second install we will complain about mismatching passwords
  only test for cleaning up 3rdparty if we're parametrized to do so.
  fix CMake so it finds the PythonInterpreter even with oder versions
  hexify corrupted markers
  Fix comments
  fixed cluster start
  issue #2022: double max allowed request body size, cap --batch-size value in arangoimp
  fixed issue #2023: added replicationFactor to docs
  improve the perf script
  fix perfsetupscript
  clean up perf script
  add SYSTEM flag to boost includes to avoid warnings
  Adding Foxx access to agency
  Adding Foxx access to agency
  fix compile warning
  Add missing windows library.
  fix windows compile problems.
  Fix syntax error in windows client installer.
  If we have relative paths that are working, make them absolute so they still work after CWD's of arangod
  ...

Conflicts:
	arangod/Agency/AgentConfiguration.cpp
	arangod/GeneralServer/HttpCommTask.cpp
	arangod/RestServer/DatabaseFeature.cpp
	arangod/VocBase/datafile.cpp
This commit is contained in:
Jan Christoph Uhde 2016-08-30 22:04:31 +02:00
commit ed111a39a1
29 changed files with 526 additions and 251 deletions

View File

@ -321,7 +321,7 @@ if (MSVC)
#http://lists.boost.org/boost-users/2016/04/85968.php
add_definitions("-D_ENABLE_ATOMIC_ALIGNMENT_FIX")
set(MSVC_LIBS crypt32.lib;WINMM.LIB;Ws2_32.lib)
set(MSVC_LIBS Shlwapi.lib;crypt32.lib;WINMM.LIB;Ws2_32.lib)
set(CMAKE_EXE_LINKER_FLAGS
"${CMAKE_EXE_LINKER_FLAGS} /SUBSYSTEM:CONSOLE /SAFESEH:NO /MACHINE:x64 /ignore:4099 ${BASE_LD_FLAGS}"
@ -708,7 +708,7 @@ if (NOT USE_BOOST_UNITTESTS)
message(STATUS "BOOST unit-tests are disabled")
endif ()
include_directories(${Boost_INCLUDE_DIR})
include_directories(SYSTEM ${Boost_INCLUDE_DIR})
add_definitions(-DARANGODB_BOOST_VERSION=\"${Boost_VERSION}\")
################################################################################

View File

@ -94,8 +94,19 @@ and the hash value is used to determine the target shard.
**Note**: Values of shard key attributes cannot be changed once set.
This option is meaningless in a single server setup.
@RESTBODYPARAM{replicationFactor,integer,optional,int64}
(The default is *1*): in a cluster, this attribute determines how many copies
of each shard are kept on different DBServers. The value 1 means that only one
copy (no synchronous replication) is kept. A value of k means that k-1 replicas
are kept. Any two copies reside on different DBServers. Replication between them is
synchronous, that is, every write operation to the "leader" copy will be replicated
to all "follower" replicas, before the write operation is reported successful.
If a server fails, this is detected automatically and one of the servers holding
copies take over, usually without an error being reported.
@RESTDESCRIPTION
Creates an new collection with a given name. The request must contain an
Creates a new collection with a given name. The request must contain an
object with the following attributes.

View File

@ -101,16 +101,6 @@ if test -f last_compiled_version.sha; then
fi
COMPILE_MATTERS="3rdParty"
CLEAN_IT=1
if test -n "$LASTREV"; then
lines=`git diff ${LASTREV}: ${COMPILE_MATTERS} | wc -l`
if test $lines -eq 0; then
echo "no relevant changes, no need for full recompile"
CLEAN_IT=0
fi
fi
# setup make options
if test -z "${CXX}"; then
@ -193,6 +183,8 @@ case "$1" in
;;
esac
CLEAN_IT=0
while [ $# -gt 0 ]; do
case "$1" in
@ -303,7 +295,11 @@ while [ $# -gt 0 ]; do
TARGET_DIR=$1
shift
;;
--checkCleanBuild)
CLEAN_IT=1
shift
;;
*)
echo "Unknown option: $1"
exit 1
@ -311,6 +307,18 @@ while [ $# -gt 0 ]; do
esac
done
if test -n "$LASTREV"; then
lines=`git diff ${LASTREV}: ${COMPILE_MATTERS} | wc -l`
if test $lines -eq 0; then
echo "no relevant changes, no need for full recompile"
CLEAN_IT=0
fi
fi
if [ "$GCC5" == 1 ]; then
CC=/usr/bin/gcc-5
CXX=/usr/bin/g++-5

View File

@ -68,10 +68,6 @@ Function disableBackButton
EnableWindow $0 0
FunctionEnd
SetShellVarContext all
FunctionEnd
!include Sections.nsh
;--- Component support macros: ---

View File

@ -15,8 +15,9 @@ if [ "$1" = "configure" -a -z "$2" ]; then
/usr/sbin/arango-init-database \
--uid arangodb --gid arangodb || true
fi
db_set arangodb3/password_again ""
db_set arangodb3/password ""
db_go
fi
# check if we should upgrade the database directory

View File

@ -180,7 +180,7 @@ bool config_t::activePushBack(std::string const& id) {
std::vector<std::string> config_t::gossipPeers() const {
READ_LOCKER(readLocker, _lock);
return _gossipPeers;
}
@ -195,7 +195,7 @@ void config_t::eraseFromGossipPeers(std::string const& endpoint) {
bool config_t::addToPool(std::pair<std::string,std::string> const& i) {
WRITE_LOCKER(readLocker, _lock);
WRITE_LOCKER(readLocker, _lock);
if (_pool.find(i.first) == _pool.end()) {
_pool[i.first] = i.second;
} else {
@ -287,14 +287,14 @@ void config_t::override(VPackSlice const& conf) {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Failed to override " << agencySizeStr << " from " << conf.toJson();
}
if (conf.hasKey(poolSizeStr) && conf.get(poolSizeStr).isUInt()) {
_poolSize = conf.get(poolSizeStr).getUInt();
} else {
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Failed to override " << poolSizeStr << " from " << conf.toJson();
}
if (conf.hasKey(minPingStr) && conf.get(minPingStr).isDouble()) {
_minPing = conf.get(minPingStr).getDouble();
} else {
@ -313,7 +313,7 @@ void config_t::override(VPackSlice const& conf) {
_pool.clear();
for (auto const& peer : VPackArrayIterator(conf.get(poolStr))) {
auto key = peer.get(idStr).copyString();
auto value = peer.get(endpointStr).copyString();
auto value = peer.get(endpointStr).copyString();
_pool[key] = value;
}
} else {
@ -365,7 +365,7 @@ void config_t::override(VPackSlice const& conf) {
}
/// @brief vpack representation
query_t config_t::toBuilder() const {
query_t ret = std::make_shared<arangodb::velocypack::Builder>();
@ -409,8 +409,8 @@ bool config_t::setId(std::string const& i) {
/// @brief merge from persisted configuration
bool config_t::merge(VPackSlice const& conf) {
bool config_t::merge(VPackSlice const& conf) {
WRITE_LOCKER(writeLocker, _lock); // All must happen under the lock or else ...
_id = conf.get(idStr).copyString(); // I get my id
@ -476,7 +476,7 @@ bool config_t::merge(VPackSlice const& conf) {
ss << "Min RAFT interval: ";
if (_minPing == 0) { // Command line beats persistence
if (conf.hasKey(minPingStr)) {
_minPing = conf.get(minPingStr).getNumericValue<double>();
_minPing = conf.get(minPingStr).getDouble();
ss << _minPing << " (persisted)";
} else {
_minPing = 0.5;
@ -491,7 +491,7 @@ bool config_t::merge(VPackSlice const& conf) {
ss << "Max RAFT interval: ";
if (_maxPing == 0) { // Command line beats persistence
if (conf.hasKey(maxPingStr)) {
_maxPing = conf.get(maxPingStr).getNumericValue<double>();
_maxPing = conf.get(maxPingStr).getDouble();
ss << _maxPing << " (persisted)";
} else {
_maxPing = 2.5;
@ -548,6 +548,6 @@ bool config_t::merge(VPackSlice const& conf) {
LOG_TOPIC(DEBUG, Logger::AGENCY) << ss.str();
return true;
}

View File

@ -40,39 +40,149 @@ using namespace arangodb::application_features;
using namespace arangodb::basics;
using namespace arangodb::consensus;
static void JS_LeadingVulpes(v8::FunctionCallbackInfo<v8::Value> const& args) {
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
v8::Isolate* isolate = args.GetIsolate();
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
try {
ApplicationServer::getEnabledFeature<AgencyFeature>("Agency");
TRI_V8_RETURN_TRUE();
} catch (std::exception const& e) {
TRI_V8_RETURN_FALSE();
}
TRI_V8_TRY_CATCH_END
}
static void JS_LeadingAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
Agent* agent = nullptr;
try {
AgencyFeature* feature =
ApplicationServer::getEnabledFeature<AgencyFeature>("AgencyFeature");
ApplicationServer::getEnabledFeature<AgencyFeature>("Agency");
agent = feature->agent();
} catch (std::exception const& e) {
TRI_V8_THROW_EXCEPTION_MESSAGE(
TRI_ERROR_INTERNAL,
std::string("couldn't access agency feature: ") + e.what());
}
v8::Handle<v8::Object> r = v8::Object::New(isolate);
r->Set(TRI_V8_ASCII_STRING("leading"),
v8::Boolean::New(isolate, agent->leading()));
TRI_V8_RETURN(r);
TRI_V8_TRY_CATCH_END
}
static void JS_ReadAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
Agent* agent = nullptr;
try {
AgencyFeature* feature =
ApplicationServer::getEnabledFeature<AgencyFeature>("Agency");
agent = feature->agent();
} catch (std::exception const& e) {
TRI_V8_THROW_EXCEPTION_MESSAGE(
TRI_ERROR_INTERNAL,
std::string("couldn't access agency feature: ") + e.what());
}
query_t query = std::make_shared<Builder>();
int res = TRI_V8ToVPack(isolate, *query, args[0], false);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
read_ret_t ret = agent->read(query);
if (ret.accepted) { // Leading
TRI_V8_RETURN(TRI_VPackToV8(isolate, ret.result->slice()));
} else { // Not leading
TRI_V8_RETURN_FALSE();
}
TRI_V8_TRY_CATCH_END
}
static void JS_WriteAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
Agent* agent = nullptr;
try {
AgencyFeature* feature =
ApplicationServer::getEnabledFeature<AgencyFeature>("Agency");
agent = feature->agent();
} catch (std::exception const& e) {
TRI_V8_THROW_EXCEPTION_MESSAGE(
TRI_ERROR_INTERNAL,
std::string("couldn't access agency feature: ") + e.what());
}
v8::Handle<v8::Object> r = v8::Object::New(isolate);
query_t query = std::make_shared<Builder>();
int res = TRI_V8ToVPack(isolate, *query, args[0], false);
if (res != TRI_ERROR_NO_ERROR) {
TRI_V8_THROW_EXCEPTION(res);
}
r->Set(TRI_V8_ASCII_STRING("leading"),
v8::Boolean::New(isolate, agent->leading()));
TRI_V8_RETURN(r);
write_ret_t ret = agent->write(query);
if (ret.accepted) { // Leading
size_t errors = 0;
Builder body;
body.openObject();
body.add("results", VPackValue(VPackValueType::Array));
for (auto const& index : ret.indices) {
body.add(VPackValue(index));
if (index == 0) {
errors++;
}
}
body.close(); body.close();
// Wait for commit of highest except if it is 0?
arangodb::consensus::index_t max_index = 0;
try {
max_index =
*std::max_element(ret.indices.begin(), ret.indices.end());
} catch (std::exception const& e) {
LOG_TOPIC(WARN, Logger::AGENCY)
<< e.what() << " " << __FILE__ << __LINE__;
}
if (max_index > 0) {
agent->waitFor(max_index);
}
TRI_V8_RETURN(TRI_VPackToV8(isolate, body.slice()));
} else { // Not leading
TRI_V8_RETURN_FALSE();
}
}
static void JS_ReadVulpes(v8::FunctionCallbackInfo<v8::Value> const& args) {
}
static void JS_WriteVulpes(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_END
}
@ -88,30 +198,32 @@ void TRI_InitV8Agency(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
// ...........................................................................
ft = v8::FunctionTemplate::New(isolate);
ft->SetClassName(TRI_V8_ASCII_STRING("ArangoVulpes"));
ft->SetClassName(TRI_V8_ASCII_STRING("ArangoAgent"));
rt = ft->InstanceTemplate();
rt->SetInternalFieldCount(2);
TRI_AddMethodVocbase(
isolate, rt, TRI_V8_ASCII_STRING("leading"), JS_LeadingVulpes);
isolate, rt, TRI_V8_ASCII_STRING("enabled"), JS_EnabledAgent);
TRI_AddMethodVocbase(
isolate, rt, TRI_V8_ASCII_STRING("read"), JS_ReadVulpes);
isolate, rt, TRI_V8_ASCII_STRING("leading"), JS_LeadingAgent);
TRI_AddMethodVocbase(
isolate, rt, TRI_V8_ASCII_STRING("write"), JS_WriteVulpes);
isolate, rt, TRI_V8_ASCII_STRING("read"), JS_ReadAgent);
TRI_AddMethodVocbase(
isolate, rt, TRI_V8_ASCII_STRING("write"), JS_WriteAgent);
v8g->VulpesTempl.Reset(isolate, rt);
ft->SetClassName(TRI_V8_ASCII_STRING("ArangoVuplesCtor"));
v8g->AgentTempl.Reset(isolate, rt);
ft->SetClassName(TRI_V8_ASCII_STRING("ArangoAgentCtor"));
TRI_AddGlobalFunctionVocbase(
isolate, context, TRI_V8_ASCII_STRING("ArangoVuplesCtor"),
isolate, context, TRI_V8_ASCII_STRING("ArangoAgentCtor"),
ft->GetFunction(), true);
// register the global object
v8::Handle<v8::Object> aa = rt->NewInstance();
if (!aa.IsEmpty()) {
TRI_AddGlobalVariableVocbase(
isolate, context, TRI_V8_ASCII_STRING("ArangoVuples"), aa);
isolate, context, TRI_V8_ASCII_STRING("ArangoAgent"), aa);
}
}

View File

@ -36,9 +36,9 @@ using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
size_t const HttpCommTask::MaximalHeaderSize = 1 * 1024 * 1024; // 1 MB
size_t const HttpCommTask::MaximalBodySize = 512 * 1024 * 1024; // 512 MB
size_t const HttpCommTask::MaximalPipelineSize = 512 * 1024 * 1024; // 512 MB
size_t const HttpCommTask::MaximalHeaderSize = 2 * 1024 * 1024; // 2 MB
size_t const HttpCommTask::MaximalBodySize = 1024 * 1024 * 1024; // 1024 MB
size_t const HttpCommTask::MaximalPipelineSize = 1024 * 1024 * 1024; // 1024 MB
size_t const HttpCommTask::RunCompactEvery = 500;
HttpCommTask::HttpCommTask(GeneralServer* server, TRI_socket_t sock,

View File

@ -136,7 +136,7 @@ void BootstrapFeature::start() {
auto vocbase = DatabaseFeature::DATABASE->systemDatabase();
auto ss = ServerState::instance();
if (!ss->isRunningInCluster() && !ss->isAgent()) {
if (!ss->isRunningInCluster()) {
LOG_TOPIC(DEBUG, Logger::STARTUP) << "Running server/server.js";
V8DealerFeature::DEALER->loadJavascript(vocbase, "server/server.js");
} else if (ss->isCoordinator()) {

View File

@ -23,8 +23,10 @@
#include "DatabaseFeature.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Agency/v8-agency.h"
#include "Aql/QueryCache.h"
#include "Aql/QueryRegistry.h"
#include "Basics/ArangoGlobalContext.h"
#include "Basics/FileUtils.h"
#include "Basics/MutexLocker.h"
#include "Basics/StringUtils.h"
@ -77,7 +79,7 @@ void DatabaseManagerThread::run() {
auto databaseFeature = ApplicationServer::getFeature<DatabaseFeature>("Database");
auto dealer = ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
int cleanupCycles = 0;
StorageEngine* engine = EngineSelectorFeature::ENGINE;
while (true) {
@ -140,7 +142,7 @@ void DatabaseManagerThread::run() {
// delete persistent indexes for this database
RocksDBFeature::dropDatabase(database->id());
#endif
LOG(TRACE) << "physically removing database directory '"
<< engine->databasePath(database) << "' of database '" << database->name()
<< "'";
@ -176,7 +178,7 @@ void DatabaseManagerThread::run() {
break;
}
usleep(waitTime());
usleep(waitTime());
// The following is only necessary after a wait:
auto queryRegistry = QueryRegistryFeature::QUERY_REGISTRY;
@ -287,7 +289,7 @@ void DatabaseFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
<< TRI_JOURNAL_MINIMAL_SIZE;
FATAL_ERROR_EXIT();
}
// sanity check
if (_checkVersion && _upgrade) {
LOG(FATAL) << "cannot specify both '--database.check-version' and "
@ -331,19 +333,19 @@ void DatabaseFeature::start() {
// start database manager thread
_databaseManager.reset(new DatabaseManagerThread);
if (!_databaseManager->start()) {
LOG(FATAL) << "could not start database manager thread";
FATAL_ERROR_EXIT();
}
// TODO: handle _upgrade and _checkVersion here
// activate deadlock detection in case we're not running in cluster mode
if (!arangodb::ServerState::instance()->isRunningInCluster()) {
enableDeadlockDetection();
}
// update all v8 contexts
updateContexts();
}
@ -366,13 +368,13 @@ void DatabaseFeature::unprepare() {
usleep(5000);
}
}
try {
try {
closeDroppedDatabases();
} catch (...) {
// we're in the shutdown... simply ignore any errors produced here
}
_databaseManager.reset();
try {
@ -485,7 +487,7 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
if (!TRI_vocbase_t::IsAllowedName(false, name)) {
return TRI_ERROR_ARANGO_DATABASE_NAME_INVALID;
}
if (id == 0) {
id = TRI_NewTickServer();
}
@ -519,7 +521,7 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
// create database in storage engine
StorageEngine* engine = EngineSelectorFeature::ENGINE;
// createDatabase must return a valid database or throw
vocbase.reset(engine->createDatabase(id, builder.slice()));
vocbase.reset(engine->createDatabase(id, builder.slice()));
TRI_ASSERT(vocbase != nullptr);
try {
@ -529,8 +531,8 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name,
<< vocbase->name() << "' failed: " << ex.what();
FATAL_ERROR_EXIT();
}
// enable deadlock detection
// enable deadlock detection
vocbase->_deadlockDetector.enabled(!arangodb::ServerState::instance()->isRunningInCluster());
// create application directories
@ -662,7 +664,7 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, boo
id = vocbase->id();
// mark as deleted
TRI_ASSERT(vocbase->type() == TRI_VOCBASE_TYPE_NORMAL);
if (!vocbase->markAsDropped()) {
// deleted by someone else?
return TRI_ERROR_ARANGO_DATABASE_NOT_FOUND;
@ -689,7 +691,7 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, boo
arangodb::aql::QueryCache::instance()->invalidate(vocbase);
res = engine->prepareDropDatabase(vocbase);
if (res == TRI_ERROR_NO_ERROR) {
if (writeMarker) {
// TODO: what shall happen in case writeDropMarker() fails?
@ -697,7 +699,7 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, boo
}
}
}
if (res == TRI_ERROR_NO_ERROR && waitForDeletion) {
engine->waitUntilDeletion(id, true);
}
@ -727,7 +729,7 @@ int DatabaseFeature::dropDatabase(TRI_voc_tick_t id, bool writeMarker, bool wait
// and call the regular drop function
return dropDatabase(name, writeMarker, waitForDeletion, removeAppsDirectory);
}
std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIdsCoordinator(bool includeSystem) {
std::vector<TRI_voc_tick_t> ids;
{
@ -749,7 +751,7 @@ std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIdsCoordinator(bool incl
std::vector<TRI_voc_tick_t> DatabaseFeature::getDatabaseIds(bool includeSystem) {
std::vector<TRI_voc_tick_t> ids;
{
auto unuser(_databasesProtector.use());
auto theLists = _databasesLists.load();
@ -846,7 +848,7 @@ TRI_vocbase_t* DatabaseFeature::useDatabaseCoordinator(TRI_voc_tick_t id) {
TRI_vocbase_t* DatabaseFeature::useDatabaseCoordinator(std::string const& name) {
auto unuser(_databasesProtector.use());
auto theLists = _databasesLists.load();
auto it = theLists->_coordinatorDatabases.find(name);
if (it != theLists->_coordinatorDatabases.end()) {
@ -854,7 +856,7 @@ TRI_vocbase_t* DatabaseFeature::useDatabaseCoordinator(std::string const& name)
vocbase->use();
return vocbase;
}
return nullptr;
}
@ -876,7 +878,7 @@ TRI_vocbase_t* DatabaseFeature::useDatabase(std::string const& name) {
TRI_vocbase_t* DatabaseFeature::useDatabase(TRI_voc_tick_t id) {
auto unuser(_databasesProtector.use());
auto theLists = _databasesLists.load();
for (auto& p : theLists->_databases) {
TRI_vocbase_t* vocbase = p.second;
@ -901,7 +903,7 @@ TRI_vocbase_t* DatabaseFeature::lookupDatabaseCoordinator(
TRI_vocbase_t* vocbase = it->second;
return vocbase;
}
return nullptr;
}
@ -942,6 +944,7 @@ void DatabaseFeature::updateContexts() {
TRI_InitV8VocBridge(isolate, context, queryRegistry, vocbase, i);
TRI_InitV8Queries(isolate, context);
TRI_InitV8Cluster(isolate, context);
TRI_InitV8Agency(isolate, context);
},
vocbase);
}
@ -1012,7 +1015,7 @@ int DatabaseFeature::createBaseApplicationDirectory(std::string const& appPath,
std::string const& type) {
int res = TRI_ERROR_NO_ERROR;
std::string path = arangodb::basics::FileUtils::buildFilename(appPath, type);
if (!TRI_IsDirectory(path.c_str())) {
std::string errorMessage;
long systemError;
@ -1066,7 +1069,7 @@ int DatabaseFeature::createApplicationDirectory(std::string const& name, std::st
<< "' for database '" << name << "': " << errorMessage;
}
}
return res;
}
@ -1074,7 +1077,7 @@ int DatabaseFeature::createApplicationDirectory(std::string const& name, std::st
int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
V8DealerFeature* dealer = ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
std::string const appPath = dealer->appPath();
StorageEngine* engine = EngineSelectorFeature::ENGINE;
int res = TRI_ERROR_NO_ERROR;
@ -1088,7 +1091,7 @@ int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
for (auto const& it : VPackArrayIterator(databases)) {
TRI_ASSERT(it.isObject());
VPackSlice deleted = it.get("deleted");
if (deleted.isBoolean() && deleted.getBoolean()) {
// ignore deleted databases here
@ -1110,7 +1113,7 @@ int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
TRI_vocbase_t* vocbase = engine->openDatabase(it, _upgrade);
// we found a valid database
TRI_ASSERT(vocbase != nullptr);
try {
vocbase->addReplicationApplier(TRI_CreateReplicationApplier(vocbase));
} catch (std::exception const& ex) {
@ -1131,7 +1134,7 @@ int DatabaseFeature::iterateDatabases(VPackSlice const& databases) {
_databasesLists = newLists;
_databasesProtector.scan();
delete oldLists;
return res;
}
@ -1179,7 +1182,7 @@ void DatabaseFeature::closeDroppedDatabases() {
delete oldList; // Note that this does not delete the TRI_vocbase_t pointers!
}
void DatabaseFeature::verifyAppPaths() {
// create shared application directory js/apps
V8DealerFeature* dealer = ApplicationServer::getFeature<V8DealerFeature>("V8Dealer");
@ -1199,7 +1202,7 @@ void DatabaseFeature::verifyAppPaths() {
THROW_ARANGO_EXCEPTION(res);
}
}
// create subdirectory js/apps/_db if not yet present
int res = createBaseApplicationDirectory(appPath, "_db");

View File

@ -35,6 +35,7 @@
#include "VocBase/ticks.h"
#include <sstream>
#include <iomanip>
// #define DEBUG_DATAFILE 1
@ -45,8 +46,8 @@ namespace {
/// @brief check if a marker appears to be created by ArangoDB 28
static TRI_voc_crc_t Crc28(TRI_voc_crc_t crc, void const* data, size_t length) {
static TRI_voc_crc_t const CrcPolynomial = 0xEDB88320;
unsigned char* current = (unsigned char*) data;
static TRI_voc_crc_t const CrcPolynomial = 0xEDB88320;
unsigned char* current = (unsigned char*) data;
while (length--) {
crc ^= *current++;
@ -54,22 +55,22 @@ static TRI_voc_crc_t Crc28(TRI_voc_crc_t crc, void const* data, size_t length) {
if (crc & 1) {
crc = (crc >> 1) ^ CrcPolynomial;
} else {
crc = crc >> 1;
crc = crc >> 1;
}
}
}
}
return crc;
}
static bool IsMarker28 (void const* marker) {
struct Marker28 {
TRI_voc_size_t _size;
TRI_voc_crc_t _crc;
uint32_t _type;
TRI_voc_size_t _size;
TRI_voc_crc_t _crc;
uint32_t _type;
#ifdef TRI_PADDING_32
char _padding_df_marker[4];
#endif
TRI_voc_tick_t _tick;
TRI_voc_tick_t _tick;
};
TRI_voc_size_t zero = 0;
@ -555,7 +556,7 @@ static bool CheckDatafile(TRI_datafile_t* datafile, bool ignoreFailures) {
if (ignoreFailures) {
return FixDatafile(datafile, currentSize);
}
datafile->_lastError =
TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE);
datafile->_currentSize = currentSize;
@ -574,7 +575,7 @@ static bool CheckDatafile(TRI_datafile_t* datafile, bool ignoreFailures) {
if (ignoreFailures) {
return FixDatafile(datafile, currentSize);
}
datafile->_lastError =
TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE);
datafile->_currentSize = currentSize;
@ -599,7 +600,7 @@ static bool CheckDatafile(TRI_datafile_t* datafile, bool ignoreFailures) {
if (ignoreFailures) {
return FixDatafile(datafile, currentSize);
}
datafile->_lastError =
TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE);
datafile->_currentSize = currentSize;
@ -622,7 +623,7 @@ static bool CheckDatafile(TRI_datafile_t* datafile, bool ignoreFailures) {
bool nextMarkerOk = false;
if (size > 0) {
auto next = reinterpret_cast<char const*>(marker) + size;
auto next = reinterpret_cast<char const*>(marker) + DatafileHelper::AlignedSize<size_t>(size);
auto p = next;
if (p < end) {
@ -669,17 +670,76 @@ static bool CheckDatafile(TRI_datafile_t* datafile, bool ignoreFailures) {
if (ignoreFailures) {
return FixDatafile(datafile, currentSize);
}
datafile->_lastError =
TRI_set_errno(TRI_ERROR_ARANGO_CORRUPTED_DATAFILE);
datafile->_currentSize = currentSize;
datafile->_next = datafile->_data + datafile->_currentSize;
datafile->_state = TRI_DF_STATE_OPEN_ERROR;
LOG(WARN) << "crc mismatch found in datafile '" << datafile->getName() << "' at position " << currentSize << ". expected crc: " << CalculateCrcValue(marker) << ", actual crc: " << marker->getCrc();
LOG(WARN) << "crc mismatch found in datafile '" << datafile->getName()
<< "' at position " << currentSize
<< ", of size " << datafile->_maximalSize;
LOG(WARN) << "crc mismatch found inside marker of type '" << TRI_NameMarkerDatafile(marker)
<< "' and size " << size
<< ". expected crc: " << CalculateCrcValue(marker)
<< ", actual crc: " << marker->getCrc();
{
LOG(INFO) << "raw marker data following:";
char const* p = reinterpret_cast<char const*>(marker);
char const* e = reinterpret_cast<char const*>(marker) + DatafileHelper::AlignedSize<size_t>(size);
std::string line;
std::string raw;
size_t printed = 0;
while (p < e) {
// print offset
line.append("0x");
uintptr_t offset = static_cast<uintptr_t>(p - datafile->_data);
for (size_t i = 0; i < 8; ++i) {
uint8_t c = static_cast<uint8_t>((offset & (0xFFULL << 8 * (7 - i))) >> 8 * (7 - i));
uint8_t n1 = c >> 4;
uint8_t n2 = c & 0x0F;
line.push_back((n1 < 10) ? ('0' + n1) : 'A' + n1 - 10);
line.push_back((n2 < 10) ? ('0' + n2) : 'A' + n2 - 10);
}
// print data
line.append(": ");
for (size_t i = 0; i < 16; ++i) {
if (p >= e) {
line.append(" ");
} else {
uint8_t c = static_cast<uint8_t>(*p++);
uint8_t n1 = c >> 4;
uint8_t n2 = c & 0x0F;
line.push_back((n1 < 10) ? ('0' + n1) : 'A' + n1 - 10);
line.push_back((n2 < 10) ? ('0' + n2) : 'A' + n2 - 10);
line.push_back(' ');
raw.push_back((c < 32 || c >= 127) ? '.' : static_cast<unsigned char>(c));
++printed;
}
}
LOG(INFO) << line << " " << raw;
line.clear();
raw.clear();
if (printed >= 2048) {
LOG(INFO) << "(output truncated due to excessive length)";
break;
}
}
}
if (nextMarkerOk) {
LOG(INFO) << "data directly following this marker looks ok so repairing the marker may recover it";
LOG(INFO) << "please restart the server with the parameter '--wal.ignore-logfile-errors true' to repair the marker";
} else {
LOG(WARN) << "data directly following this marker cannot be analyzed";
}
@ -920,7 +980,7 @@ static TRI_datafile_t* OpenDatafile(std::string const& filename, bool ignoreErro
// read header from file
char buffer[128];
memset(&buffer[0], 0, sizeof(buffer));
memset(&buffer[0], 0, sizeof(buffer));
ssize_t len = sizeof(TRI_df_header_marker_t);
@ -1666,7 +1726,7 @@ static std::string DiagnoseMarker(TRI_df_marker_t const* marker,
if (marker->getCrc() == crc) {
return "crc checksum is correct";
}
result << "crc checksum (hex " << std::hex << marker->getCrc()
<< ") is wrong. expecting (hex " << std::hex << crc << ")";
@ -1818,8 +1878,8 @@ TRI_datafile_t::TRI_datafile_t(std::string const& filename, int fd, void* mmHand
_state(TRI_DF_STATE_READ),
_fd(fd),
_mmHandle(mmHandle),
_initSize(maximalSize),
_maximalSize(maximalSize),
_initSize(maximalSize),
_maximalSize(maximalSize),
_currentSize(currentSize),
_footerSize(sizeof(TRI_df_footer_marker_t)),
_data(data),
@ -1845,7 +1905,7 @@ TRI_datafile_t::TRI_datafile_t(std::string const& filename, int fd, void* mmHand
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_SEQUENTIAL);
}
}
TRI_datafile_t::~TRI_datafile_t() {
try {
this->close();
@ -1892,13 +1952,13 @@ int TRI_datafile_t::close() {
_fd = -1;
return TRI_ERROR_NO_ERROR;
}
}
if (_state == TRI_DF_STATE_CLOSED) {
LOG(WARN) << "closing an already closed datafile '" << getName() << "'";
return TRI_ERROR_NO_ERROR;
}
}
return TRI_ERROR_ARANGO_ILLEGAL_STATE;
}
@ -1918,4 +1978,4 @@ bool TRI_datafile_t::sync(char const* begin, char const* end) {
return TRI_MSync(_fd, begin, end);
}

View File

@ -161,6 +161,16 @@ void ImportFeature::validateOptions(
StringUtils::join(positionals, ", ");
FATAL_ERROR_EXIT();
}
static unsigned const MaxBatchSize = 768 * 1024 * 1024;
if (_chunkSize > MaxBatchSize) {
// it's not sensible to raise the batch size beyond this value
// because the server has a built-in limit for the batch size too
// and will reject bigger HTTP request bodies
LOG(WARN) << "capping --batch-size value to " << MaxBatchSize;
_chunkSize = MaxBatchSize;
}
}
void ImportFeature::start() {

View File

@ -3,32 +3,32 @@
# we can't use RUNTIME DESTINATION here.
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}
PROGRAMS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}
DESTINATION ${CMAKE_INSTALL_BINDIR})
install_config(arangobench)
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}
PROGRAMS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}
DESTINATION ${CMAKE_INSTALL_BINDIR})
install_config(arangodump)
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}
PROGRAMS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}
DESTINATION ${CMAKE_INSTALL_BINDIR})
install_config(arangoimp)
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}
PROGRAMS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}
DESTINATION ${CMAKE_INSTALL_BINDIR})
install_config(arangorestore)
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}
PROGRAMS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}
DESTINATION ${CMAKE_INSTALL_BINDIR})
install_config(arangosh)
install(
FILES ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOVPACK}${CMAKE_EXECUTABLE_SUFFIX}
PROGRAMS ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOVPACK}${CMAKE_EXECUTABLE_SUFFIX}
DESTINATION ${CMAKE_INSTALL_BINDIR})
install_command_alias(${BIN_ARANGOSH}

View File

@ -16,8 +16,6 @@ endif ()
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/arangodb3")
set(CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_FULL_SYSCONFDIR}/arangodb3")
file(TO_NATIVE_PATH "${CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO}" ETCDIR_NATIVE)
# database directory
FILE(MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/var/lib/arangodb3")

View File

@ -8,9 +8,23 @@ cmake_minimum_required(VERSION 2.8)
# variables from the main build have to be explicitely forwarded:
################################################################################
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY "@PROJECT_BINARY_DIR@/bin/")
set(CMAKE_INSTALL_BINDIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
set(CMAKE_RUNTIME_OUTPUT_DIRECTORY_X ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
set(CMAKE_INSTALL_BINDIR @CMAKE_INSTALL_BINDIR@)
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
set(CMAKE_INSTALL_DATAROOTDIR @CMAKE_INSTALL_DATAROOTDIR@)
set(CMAKE_INSTALL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_DATAROOTDIR_ARANGO@)
set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO @CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO@)
set(CMAKE_INSTALL_DIR @CMAKE_INSTALL_DIR@)
set(CMAKE_INSTALL_PREFIX @CMAKE_INSTALL_PREFIX@)
set(CMAKE_INSTALL_SYSCONFDIR @CMAKE_INSTALL_SYSCONFDIR@)
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO @CMAKE_INSTALL_SYSCONFDIR_ARANGO@)
set(CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO @CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO@)
################################################################################
# Substitute the install binaries:
################################################################################
@ -29,21 +43,20 @@ set(ARANGODB_SOURCE_DIR @ARANGODB_SOURCE_DIR@)
set(ARANGODB_VERSION @ARANGODB_VERSION@)
set(ARANGODB_PACKAGE_CONTACT @ARANGODB_PACKAGE_CONTACT@)
set(ARANGODB_PACKAGE_REVISION @ARANGODB_PACKAGE_REVISION@)
set(CMAKE_INSTALL_FULL_BINDIR @CMAKE_INSTALL_FULL_BINDIR@)
set(ARANGODB_PACKAGE_VENDOR @ARANGODB_PACKAGE_VENDOR@)
set(CMAKE_TARGET_ARCHITECTURES @CMAKE_TARGET_ARCHITECTURES@)
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO @CMAKE_INSTALL_SYSCONFDIR_ARANGO@)
set(CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO @CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO@)
set(ORIGINAL_SOURCE_DIR @PROJECT_SOURCE_DIR@)
set(PROJECT_SOURCE_DIR @PROJECT_SOURCE_DIR@)
################################################################################
# Get the final values for cpack:
################################################################################
set(CPACK_PACKAGE_VERSION "${ARANGODB_VERSION}")
set(CPACK_PACKAGE_NAME "arangodb3-client")
set(CPACK_DEBIAN_PACKAGE_SECTION "shell")
set(CPACK_PACKAGE_VENDOR ${ARANGODB_PACKAGE_VENDOR})
set(CPACK_PACKAGE_CONTACT ${ARANGODB_PACKAGE_CONTACT})
set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_SOURCE_DIR}/LICENSE")
set(CPACK_DEBIAN_PACKAGE_HOMEPAGE ${ARANGODB_URL_INFO_ABOUT})
@ -51,6 +64,10 @@ set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON)
set(CPACK_DEBIAN_PACKAGE_CONFLICTS "arangodb, arangodb3")
set(CPACK_DEBIAN_COMPRESSION_TYPE "xz")
set(CPACK_COMPONENTS_ALL debian-extras)
set(CPACK_GENERATOR "DEB")
set(CPACK_SET_DESTDIR ON)
set(CPACK_PACKAGING_INSTALL_PREFIX ${CMAKE_INSTALL_PREFIX})
file(READ "${PROJECT_SOURCE_DIR}/Installation/debian/client_packagedesc.txt"
CPACK_DEBIAN_PACKAGE_DESCRIPTION)
@ -73,12 +90,7 @@ set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${AR
################################################################################
# Install the external files into the package directory:
################################################################################
include(${PROJECT_SOURCE_DIR}/cmake/GNUInstallDirs.cmake)
set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_SYSCONFDIR}/arangodb3" CACHE PATH "read-only single-machine data (etc)")
set(CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO "${CMAKE_INSTALL_FULL_SYSCONFDIR}/arangodb3" CACHE PATH "read-only single-machine data (etc)")
set(CMAKE_INSTALL_DATAROOTDIR_ARANGO "${CMAKE_INSTALL_DATAROOTDIR}/arangodb3" CACHE PATH "read-only data (share)")
set(CMAKE_INSTALL_FULL_DATAROOTDIR_ARANGO "${CMAKE_INSTALL_FULL_DATAROOTDIR}/arangodb3" CACHE PATH "read-only data (share)")
set(INSTALL_MACROS_NO_TARGET_INSTALL TRUE)
include(${ORIGINAL_SOURCE_DIR}/cmake/InstallMacros.cmake)

View File

@ -24,6 +24,7 @@
// //////////////////////////////////////////////////////////////////////////////
const isCluster = require("@arangodb/cluster").isCluster();
const isAgent = global.ArangoAgent.enabled();
var _ = require('lodash');
var flatten = require('internal').flatten;
@ -231,7 +232,7 @@ function asNumber (num) {
}
function updateQueueDelayClusterAware() {
if (isCluster) {
if (isCluster && !isAgent) {
global.ArangoAgency.set('Current/FoxxmasterQueueupdate', true);
}
updateQueueDelay();

View File

@ -284,6 +284,12 @@ void ArangoGlobalContext::getCheckPath(std::string &path, const char *whichPath,
LOG(ERR) << "failed to locate " << whichPath << " directory, its neither available in '" << path << "' nor in '" << directory << "'";
FATAL_ERROR_EXIT();
}
arangodb::basics::FileUtils::normalizePath(directory);
path = directory;
}
else {
if (!TRI_PathIsAbsolute(path)) {
arangodb::basics::FileUtils::makePathAbsolute(path);
}
}
}

View File

@ -616,6 +616,16 @@ std::string dirname(std::string const& name) {
return base;
}
void makePathAbsolute(std::string &path) {
int err = 0;
std::string cwd = FileUtils::currentDirectory(&err);
char * p = TRI_GetAbsolutePath(path.c_str(), cwd.c_str());
path = p;
TRI_FreeString(TRI_CORE_MEM_ZONE, p);
}
}
}
}

View File

@ -53,6 +53,14 @@ std::string removeTrailingSeparator(std::string const& name);
void normalizePath(std::string& name);
////////////////////////////////////////////////////////////////////////////////
/// @brief makes a path absolute
///
/// path will be modified in-place
////////////////////////////////////////////////////////////////////////////////
void makePathAbsolute(std::string &path);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates a filename
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,5 +1,5 @@
#define LOCCAL_STATE_DIR "@CMAKE_INSTALL_FULL_LOCALSTATEDIR@"
#define _SYSCONFDIR_ "@ETCDIR_NATIVE@"
#define _SYSCONFDIR_ "@CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO@"
#define STARTUP_DIRECTORY "@PKGDATADIR@/js"
#define DESTINATION_DIR "@CMAKE_INSTALL_DATAROOTDIR_ARANGO@/js"
#define ICU_DESTINATION_DIRECTORY "@ICU_DT_DEST@"

View File

@ -25,6 +25,7 @@
#ifdef _WIN32
#include <tchar.h>
#include <Shlwapi.h>
#endif
#include "Basics/directories.h"
@ -1698,25 +1699,25 @@ std::string TRI_LocateBinaryPath(char const* argv0) {
std::string TRI_GetInstallRoot(std::string const& binaryPath,
char const *installBinaryPath) {
// First lets remove trailing (back) slashes from the bill:
long ibpLength = strlen(installBinaryPath);
size_t ibpLength = strlen(installBinaryPath);
if (installBinaryPath[ibpLength - 1] == TRI_DIR_SEPARATOR_CHAR) {
ibpLength --;
}
long bpLength = binaryPath.length();
const char *pbPath = binaryPath.c_str();
size_t bpLength = binaryPath.length();
char const* pbPath = binaryPath.c_str();
if (pbPath[bpLength - 1] == TRI_DIR_SEPARATOR_CHAR) {
bpLength --;
--bpLength;
}
if (ibpLength > bpLength) {
return TRI_DIR_SEPARATOR_STR;
}
for (int i = 1; i < ibpLength; i ++) {
if (pbPath[bpLength -i] != installBinaryPath[ibpLength - i]) {
for (size_t i = 1; i < ibpLength; ++i) {
if (pbPath[bpLength - i] != installBinaryPath[ibpLength - i]) {
return TRI_DIR_SEPARATOR_STR;
}
}
@ -2422,3 +2423,16 @@ void TRI_InitializeFiles() {
////////////////////////////////////////////////////////////////////////////////
void TRI_ShutdownFiles() {}
#if _WIN32
bool TRI_PathIsAbsolute(const std::string &path) {
return !PathIsRelative(path.c_str());
}
#else
bool TRI_PathIsAbsolute(const std::string &path) {
return path.c_str()[0] == '/';
}
#endif

View File

@ -369,4 +369,10 @@ void TRI_InitializeFiles();
void TRI_ShutdownFiles();
////////////////////////////////////////////////////////////////////////////////
/// @brief checks whether path is full qualified or relative
////////////////////////////////////////////////////////////////////////////////
bool TRI_PathIsAbsolute(const std::string &path);
#endif

View File

@ -325,7 +325,9 @@ void TRI_FixIcuDataEnv() {
putenv(e.c_str());
} else {
#ifdef _SYSCONFDIR_
std::string e = "ICU_DATA=" + std::string(_SYSCONFDIR_) + "..\\..\\bin";
std::string SCDIR(_SYSCONFDIR_);
SCDIR = StringUtils::replace(SCDIR, "/", "\\\\");
std::string e = "ICU_DATA=" + SCDIR + "..\\..\\bin";
e = StringUtils::replace(e, "\\", "\\\\");
putenv(e.c_str());
#else

View File

@ -162,7 +162,7 @@ void SimpleHttpResult::addHeaderField(char const* key, size_t keyLength,
if (_returnCode == 204) {
// HTTP 204 = No content. Assume we will have a content-length of 0.
// note that will value can be overridden later if the response has the content-length
// note that the value can be overridden later if the response has the content-length
// header set to some other value
setContentLength(0);
}

View File

@ -28,6 +28,7 @@ TRI_v8_global_s::TRI_v8_global_s(v8::Isolate* isolate)
JSVPack(),
AgencyTempl(),
AgentTempl(),
ClusterInfoTempl(),
ServerStateTempl(),
ClusterCommTempl(),
@ -35,7 +36,6 @@ TRI_v8_global_s::TRI_v8_global_s(v8::Isolate* isolate)
VPackTempl(),
VocbaseColTempl(),
VocbaseTempl(),
VulpesTempl(),
BufferTempl(),

View File

@ -475,6 +475,12 @@ typedef struct TRI_v8_global_s {
v8::Persistent<v8::ObjectTemplate> AgencyTempl;
//////////////////////////////////////////////////////////////////////////////
/// @brief local agent template
//////////////////////////////////////////////////////////////////////////////
v8::Persistent<v8::ObjectTemplate> AgentTempl;
//////////////////////////////////////////////////////////////////////////////
/// @brief clusterinfo template
//////////////////////////////////////////////////////////////////////////////
@ -517,12 +523,6 @@ typedef struct TRI_v8_global_s {
v8::Persistent<v8::ObjectTemplate> VocbaseTempl;
//////////////////////////////////////////////////////////////////////////////
/// @brief vulpes template
//////////////////////////////////////////////////////////////////////////////
v8::Persistent<v8::ObjectTemplate> VulpesTempl;
//////////////////////////////////////////////////////////////////////////////
/// @brief TRI_vocbase_t template
//////////////////////////////////////////////////////////////////////////////

View File

@ -1,17 +1,20 @@
// Compile with
// g++ perfanalysis.cpp -o perfanalyis -std=c++11 -Wall -O3
#include <iostream>
#include <string>
#include <vector>
#include <cstdlib>
#include <cstring>
#include <unordered_map>
#include <algorithm>
#include <iostream>
#include <regex>
#include <sstream>
#include <stdexcept>
#include <string>
#include <unordered_map>
#include <vector>
#include <memory>
using namespace std;
struct Event {
static const regex re;
string threadName;
int tid;
string cpu;
@ -22,45 +25,27 @@ struct Event {
bool isRet;
Event(string& line) : isRet(false) {
char* s = strdup(line.c_str());
char* p = strtok(s, " ");
char* q;
if (p != nullptr) {
threadName = p;
p = strtok(nullptr, " ");
tid = strtol(p, nullptr, 10);
p = strtok(nullptr, " ");
cpu = p;
p = strtok(nullptr, " ");
startTime = strtod(p, nullptr);
p = strtok(nullptr, " ");
q = strtok(nullptr, " ");
if (strcmp(q, "cs:") == 0) {
free(s);
return;
}
name = p;
name.pop_back();
auto l = name.size();
if (l >= 3 && name[l-1] == 't' && name[l-2] == 'e' &&
name[l-3] == 'R') {
isRet = true;
name.pop_back();
name.pop_back();
name.pop_back();
}
inbrackets = q;
std::smatch match_obj;
if(!std::regex_search(line, match_obj, re)){
throw std::logic_error("could not parse line");
}
threadName = match_obj[1];
tid = std::stoi(match_obj[2]);
cpu = match_obj[3];
startTime = std::stod(match_obj[4]);
duration = 0;
name = match_obj[6];
inbrackets = match_obj[7];
if (match_obj[9].length() > 0) {
isRet = true;
name.erase(name.end() - 3, name.end()); // remove Ret suffix form name
}
free(s);
}
bool empty() {
return name.empty();
}
bool empty() { return name.empty(); }
string id() {
return to_string(tid) + name;
}
string id() { return to_string(tid) + name; }
string pretty() {
return to_string(duration) + " " + name + " " + to_string(startTime);
@ -77,31 +62,46 @@ struct Event {
}
};
int main(int argc, char* argv[]) {
unordered_map<string, Event*> table;
vector<Event*> list;
// sample output:
// arangod 32636 [005] 16678249.324973: probe_arangod:insertLocalRet: (14a7f60 <- 14a78d6)
// process tid core timepoint scope:name frame
const regex Event::re(
R"_(\s*(\S+))_" // name 1
R"_(\s+(\d+))_" // tid 2
R"_(\s+\[(\d+)\])_" // cup 3
R"_(\s+(\d+\.\d+):)_" // time 4
R"_(\s+([^: ]+):([^: ]+):)_" // scope:func 5:6
R"_(\s+\(([0-9a-f]+)(\s+<-\s+([0-9a-f]+))?\))_" // (start -> stop) 7 -> 9
,
std::regex_constants::ECMAScript | std::regex_constants::optimize);
int main(int /*argc*/, char* /*argv*/ []) {
unordered_map<string, unique_ptr<Event>> table;
vector<unique_ptr<Event>> list;
string line;
while (getline(cin, line)) {
Event* e = new Event(line);
if (!e->empty()) {
string id = e->id();
if (!e->isRet) {
auto event = std::make_unique<Event>(line);
if (!event->empty()) {
string id = event->id();
// insert to table if it is not a function return
if (!event->isRet) {
auto it = table.find(id);
if (it != table.end()) {
cout << "Alarm, double event found:\n" << line << std::endl;
} else {
table.insert(make_pair(id, e));
table.insert(make_pair(id, std::move(event)));
}
// update duration in table
} else {
auto it = table.find(id);
if (it == table.end()) {
cout << "Return for unknown event found:\n" << line << std::endl;
} else {
Event* s = it->second;
unique_ptr<Event> ev = std::move(it->second);
table.erase(it);
s->duration = e->startTime - s->startTime;
list.push_back(s);
ev->duration = event->startTime - ev->startTime;
list.push_back(std::move(ev));
}
}
}
@ -109,13 +109,11 @@ int main(int argc, char* argv[]) {
cout << "Unreturned events:\n";
for (auto& p : table) {
cout << p.second->pretty() << "\n";
delete p.second;
}
sort(list.begin(), list.end(), [](Event* a, Event* b) -> bool {
return *a < *b;
});
sort(list.begin(), list.end(),
[](unique_ptr<Event>const& a, unique_ptr<Event>const& b) -> bool { return *a < *b; });
cout << "Events sorted by name and time:\n";
for (auto* e : list) {
for (auto& e : list) {
cout << e->pretty() << "\n";
}
return 0;

View File

@ -6,60 +6,78 @@
#
# This script sets up performance monitoring events to measure single
# document operations. Run this script with sudo when the ArangoDB
# process is already running. Then do
# process is already running:
#
# ./setupPerfEvents.sh
#
# Now you are able to recrod the event with:
#
# sudo perf record -e "probe_arangod:*" -aR sleep 60
# (to sample for 60 seconds). A file "perf.data" is written to the
# current directory.
# Dump the events in this file with
#
# The above command will get sample data for 60 seconds. A file "perf.data" is
# written to the current directory. Dump the events in this file with:
#
# sudo perf script > perf.history
#
# This logs the times when individual threads hit the events.
# Use the program perfanalyis.cpp in this directory in the following way:
#
# sudo ./perfanalyis < perf.history > perf.statistics
# This will group enter and exit events of functions together, compute
# the time spent and sort by function.
# Remove all events with
#
# This will group enter and exit events of functions together, compute the time
# spent and sort by function. When finised remove all events with:
#
# sudo perf probe -d "probe_arangod:*"
# List events with
#
# List events with:
#
# sudo perf probe -l
#
#
ARANGOD_EXECUTABLE=build/bin/arangod
perf probe -x $ARANGOD_EXECUTABLE -d "probe_arangod:*"
main(){
local ARANGOD_EXECUTABLE=${1-build/bin/arangod}
echo Adding events, this takes a few seconds...
#delete all existing events
perf probe -x $ARANGOD_EXECUTABLE -d "probe_arangod:*"
echo "Adding events, this takes a few seconds..."
echo "Single document operations..."
addEvent insertLocal
addEvent removeLocal
addEvent modifyLocal
addEvent documentLocal
echo "Single document operations on coordinator..."
addEvent insertCoordinator
addEvent removeCoordinator
addEvent updateCoordinator
addEvent replaceCoordinator
addEvent documentCoordinator
echo "work method in HttpServerJob"
addEvent workHttpServerJob work@HttpServerJob.cpp
echo "work method in RestDocumentHandler"
addEvent executeRestReadDocument readDocument@RestDocumentHandler.cpp
addEvent executeRestInsertDocument createDocument@RestDocumentHandler.cpp
addEvent handleRequest handleRequest@HttpServer.cpp
addEvent handleWrite handleWrite@SocketTask.cpp
addEvent tcp_sendmsg
addEvent tcp_recvmsg
echo Done.
}
addEvent() {
x=$1
y=$2
if [ "x$y" == "x" ] ; then
y=$x
fi
echo $x
perf probe -x $ARANGOD_EXECUTABLE -a $x=$y 2> /dev/null
perf probe -x $ARANGOD_EXECUTABLE -a ${x}Ret=$y%return 2> /dev/null
local name="$1"
local func="${2-"${name}"}"
echo "setting up $name for function: $func"
perf probe -x $ARANGOD_EXECUTABLE -a $name=$func 2> /dev/null #enter function
perf probe -x $ARANGOD_EXECUTABLE -a ${name}Ret=$func%return 2> /dev/null #return form function
}
echo Single document operations...
addEvent insertLocal
addEvent removeLocal
addEvent modifyLocal
addEvent documentLocal
echo Single document operations on coordinator...
addEvent insertCoordinator
addEvent removeCoordinator
addEvent updateCoordinator
addEvent replaceCoordinator
addEvent documentCoordinator
echo work method in HttpServerJob
addEvent workHttpServerJob work@HttpServerJob.cpp
echo work method in RestDocumentHandler
addEvent executeRestReadDocument readDocument@RestDocumentHandler.cpp
addEvent executeRestInsertDocument createDocument@RestDocumentHandler.cpp
addEvent handleRequest handleRequest@HttpServer.cpp
addEvent handleWrite handleWrite@SocketTask.cpp
addEvent tcp_sendmsg
addEvent tcp_recvmsg
echo Done.
main "$@"

View File

@ -96,6 +96,7 @@ start() {
PORT=$2
mkdir cluster/data$PORT
echo Starting $TYPE on port $PORT
mkdir -p cluster/apps$PORT
build/bin/arangod -c none \
--database.directory cluster/data$PORT \
--cluster.agency-endpoint tcp://127.0.0.1:$BASE \