1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into engine-api

This commit is contained in:
jsteemann 2017-02-13 08:37:41 +01:00
commit 8b273b5eaf
59 changed files with 963 additions and 1152 deletions

View File

@ -42,6 +42,7 @@ install:
before_script: "bash -c Installation/travisCI/before_script.sh"
script: "bash -c Installation/travisCI/build.sh"
after_failure: "bash -c Installation/travisCI/after_failure.sh"
after_script: "bash -c Installation/travisCI/after_script.sh"
notifications:
slack:

View File

@ -48,14 +48,26 @@ v3.2.alpha1 (2017-02-05)
v3.1.11 (XXXX-XX-XX)
--------------------
* ui: optimized smart graph creation usability
* ui: fixed #2308
* fixed a race in async task cancellation via `require("@arangodb/tasks").unregisterTask()`
* fixed spuriously hanging threads in cluster AQL that could sit idle for a few minutes
* fixed potential numeric overflow for big index ids in index deletion API
* fixed sort issue in cluster, occurring when one of the local sort buffers of a
GatherNode was empty
* reduce number of HTTP requests made for certain kinds of join queries in cluster,
leading to speedup of some join queries
* supervision deals with demised coordinators correctly again
v3.1.10 (2017-XX-XX)
v3.1.10 (2017-02-02)
--------------------
* updated versions of bundled node modules:

View File

@ -499,7 +499,7 @@ if (USE_MAINTAINER_MODE)
find_program(AWK_EXECUTABLE awk)
endif ()
include(ArangoDBInstall)
include(debugInformation)
find_program(FILE_EXECUTABLE file)
detect_binary_id_type(CMAKE_DEBUG_FILENAMES_SHA_SUM)
@ -838,6 +838,7 @@ if (MSVC)
include(generate_product_version)
endif()
include(ArangoDBInstall)
if (NOT(SKIP_PACKAGING))
include(packages/packages)
endif()

View File

@ -578,7 +578,7 @@ ${MAKE_CMD_PREFIX} ${MAKE} ${MAKE_PARAMS}
(cd ${SOURCE_DIR}; git rev-parse HEAD > last_compiled_version.sha)
if [ -n "$CPACK" -a -n "${TARGET_DIR}" ]; then
${PACKAGE_MAKE} packages
${PACKAGE_MAKE} packages || exit 1
fi
# and install
@ -588,8 +588,8 @@ if test -n "${TARGET_DIR}"; then
mkdir -p "${TARGET_DIR}"
dir="${TARGET_DIR}"
if [ -n "$CPACK" -a -n "${TARGET_DIR}" ]; then
${PACKAGE_MAKE} copy_packages
${PACKAGE_MAKE} clean_packages
${PACKAGE_MAKE} copy_packages || exit 1
${PACKAGE_MAKE} clean_packages || exit 1
else
# we re-use a generic cpack tarball:
${PACKAGE_MAKE} TGZ_package

View File

@ -0,0 +1,4 @@
echo "killing arango* binaries"
killall -9 "arangod" || true
killall -9 "arangosh" || true

View File

@ -221,21 +221,19 @@ bool AddFollower::start() {
pending.close();
// --- Plan changes
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
for (auto const& srv : VPackArrayIterator(planned)) {
pending.add(srv);
}
for (auto const& i : _newFollower) {
pending.add(VPackValue(i));
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Object));
pending.add("op", VPackValue("push"));
pending.add("new", VPackValue(i));
pending.close();
}
pending.close();
// --- Increment Plan/Version
pending.add(_agencyPrefix + planVersion, VPackValue(VPackValueType::Object));
pending.add("op", VPackValue("increment"));
pending.close();
pending.close();
pending.close(); // Operations
// Preconditions

View File

@ -1000,8 +1000,8 @@ uint64_t AgencyComm::uniqid(uint64_t count, double timeout) {
return oldValue;
}
bool AgencyComm::registerCallback(std::string const& key,
std::string const& endpoint) {
AgencyCommResult AgencyComm::registerCallback(std::string const& key,
std::string const& endpoint) {
VPackBuilder builder;
builder.add(VPackValue(endpoint));
@ -1010,11 +1010,11 @@ bool AgencyComm::registerCallback(std::string const& key,
AgencyWriteTransaction transaction(operation);
auto result = sendTransactionWithFailover(transaction);
return result.successful();
return result;
}
bool AgencyComm::unregisterCallback(std::string const& key,
std::string const& endpoint) {
AgencyCommResult AgencyComm::unregisterCallback(std::string const& key,
std::string const& endpoint) {
VPackBuilder builder;
builder.add(VPackValue(endpoint));
@ -1023,7 +1023,7 @@ bool AgencyComm::unregisterCallback(std::string const& key,
AgencyWriteTransaction transaction(operation);
auto result = sendTransactionWithFailover(transaction);
return result.successful();
return result;
}
bool AgencyComm::lockRead(std::string const& key, double ttl, double timeout) {
@ -1533,7 +1533,8 @@ AgencyCommResult AgencyComm::send(
if (cc == nullptr) {
// nullptr only happens during controlled shutdown
result._message = "could not send request to agency because of shutdown";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM) << "could not send request to agency";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "could not send request to agency because of shutdown";
return result;
}

View File

@ -621,9 +621,11 @@ class AgencyComm {
uint64_t uniqid(uint64_t, double);
bool registerCallback(std::string const& key, std::string const& endpoint);
AgencyCommResult registerCallback(std::string const& key,
std::string const& endpoint);
bool unregisterCallback(std::string const& key, std::string const& endpoint);
AgencyCommResult unregisterCallback(std::string const& key,
std::string const& endpoint);
void updateEndpoints(arangodb::velocypack::Slice const&);

View File

@ -217,6 +217,7 @@ void Agent::reportIn(std::string const& peerId, index_t index, size_t toLog) {
if (index > _confirmed[peerId]) { // progress this follower?
_confirmed[peerId] = index;
if (toLog > 0) { // We want to reset the wait time only if a package callback
LOG_TOPIC(TRACE, Logger::AGENCY) << "Got call back of " << toLog << " logs";
_earliestPackage[peerId] = system_clock::now();
}
}
@ -324,7 +325,7 @@ bool Agent::recvAppendEntriesRPC(
void Agent::sendAppendEntriesRPC() {
std::chrono::duration<int, std::ratio<1, 1000000>> const dt (
(_config.waitForSync() ? 10000 : 500));
(_config.waitForSync() ? 40000 : 2000));
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr only happens during controlled shutdown

View File

@ -29,12 +29,12 @@ using namespace arangodb::consensus;
using namespace arangodb::velocypack;
AgentCallback::AgentCallback() :
_agent(0), _last(0), _startTime(0.0), _toLog(0) {}
_agent(0), _last(0), _toLog(0), _startTime(0.0) {}
AgentCallback::AgentCallback(Agent* agent, std::string const& slaveID,
index_t last, size_t toLog)
: _agent(agent), _last(last), _slaveID(slaveID),
_startTime(TRI_microtime()) {}
: _agent(agent), _last(last), _slaveID(slaveID), _toLog(toLog),
_startTime(TRI_microtime()) {}
void AgentCallback::shutdown() { _agent = 0; }

View File

@ -46,8 +46,9 @@ class AgentCallback : public arangodb::ClusterCommCallback {
Agent* _agent;
index_t _last;
std::string _slaveID;
double _startTime;
size_t _toLog;
double _startTime;
};
}
} // namespace

View File

@ -579,9 +579,9 @@ void Inception::run() {
// If command line RAFT timings have not been set explicitly
// Try good estimate of RAFT time limits
if (!config.cmdLineTimings()) {
/*if (!config.cmdLineTimings()) {
estimateRAFTInterval();
}
}*/
LOG_TOPIC(INFO, Logger::AGENCY) << "Activating agent.";
_agent->ready(true);

View File

@ -69,6 +69,15 @@ State::State(std::string const& endpoint)
/// Default dtor
State::~State() {}
inline static std::string timestamp() {
std::time_t t = std::time(nullptr);
char mbstr[100];
return
std::strftime(
mbstr, sizeof(mbstr), "%Y-%m-%d %H:%M:%S %Z", std::localtime(&t)) ?
std::string(mbstr) : std::string();
}
inline static std::string stringify(arangodb::consensus::index_t index) {
std::ostringstream i_str;
i_str << std::setw(20) << std::setfill('0') << index;
@ -79,31 +88,36 @@ inline static std::string stringify(arangodb::consensus::index_t index) {
bool State::persist(arangodb::consensus::index_t index, term_t term,
arangodb::velocypack::Slice const& entry,
std::string const& clientId) const {
Builder body;
body.add(VPackValue(VPackValueType::Object));
body.add("_key", Value(stringify(index)));
body.add("term", Value(term));
body.add("request", entry);
body.add("clientId", Value(clientId));
body.close();
{
VPackObjectBuilder b(&body);
body.add("_key", Value(stringify(index)));
body.add("term", Value(term));
body.add("request", entry);
body.add("clientId", Value(clientId));
body.add("timestamp", Value(timestamp()));
}
TRI_ASSERT(_vocbase != nullptr);
auto transactionContext =
std::make_shared<StandaloneTransactionContext>(_vocbase);
SingleCollectionTransaction trx(transactionContext, "log",
AccessMode::Type::WRITE);
SingleCollectionTransaction trx(
transactionContext, "log", AccessMode::Type::WRITE);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(res);
}
OperationResult result;
try {
result = trx.insert("log", body.slice(), _options);
} catch (std::exception const& e) {
LOG_TOPIC(ERR, Logger::AGENCY) << "Failed to persist log entry:"
<< e.what();
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Failed to persist log entry:" << e.what();
}
res = trx.finish(result.code);
return (res == TRI_ERROR_NO_ERROR);

View File

@ -184,7 +184,7 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent,
auto unusedWalker = [](AstNode const* n, void*) {};
bool isEdge = false;
// We define that depth == UINT64_MAX is "ALL depths"
size_t depth = UINT64_MAX;
uint64_t depth = UINT64_MAX;
void* unused = nullptr;
AstNode* parentOfReplace = nullptr;
size_t replaceIdx = 0;
@ -239,7 +239,7 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent,
notSupported = true;
return node;
}
depth = static_cast<size_t>(node->value.value._int);
depth = static_cast<uint64_t>(node->value.value._int);
break;
}
case NODE_TYPE_ITERATOR:

View File

@ -1129,7 +1129,7 @@ void TraversalNode::setCondition(arangodb::aql::Condition* condition) {
}
void TraversalNode::registerCondition(bool isConditionOnEdge,
size_t conditionLevel,
uint64_t conditionLevel,
AstNode const* condition) {
Ast::getReferencedVariables(condition, _conditionVariables);
if (isConditionOnEdge) {

View File

@ -218,7 +218,7 @@ class TraversalNode : public ExecutionNode {
/// @brief register a filter condition on a given search depth.
/// If this condition is not fulfilled a traversal will abort.
/// The condition will contain the local variable for it's accesses.
void registerCondition(bool, size_t, AstNode const*);
void registerCondition(bool, uint64_t, AstNode const*);
/// @brief register a filter condition for all search depths
/// If this condition is not fulfilled a traversal will abort.
@ -328,11 +328,11 @@ class TraversalNode : public ExecutionNode {
std::vector<AstNode const*> _globalVertexConditions;
/// @brief List of all depth specific conditions for edges
std::unordered_map<size_t, std::unique_ptr<TraversalEdgeConditionBuilder>>
std::unordered_map<uint64_t, std::unique_ptr<TraversalEdgeConditionBuilder>>
_edgeConditions;
/// @brief List of all depth specific conditions for vertices
std::unordered_map<size_t, AstNode*> _vertexConditions;
std::unordered_map<uint64_t, AstNode*> _vertexConditions;
/// @brief Flag if options are already prepared. After
/// this flag was set the node cannot be cloned

View File

@ -308,8 +308,8 @@ SET(ARANGOD_SOURCES
Statistics/ServerStatistics.cpp
Statistics/StatisticsFeature.cpp
StorageEngine/EngineSelectorFeature.cpp
StorageEngine/RocksDBEngine.cpp
StorageEngine/RocksDBIndexFactory.cpp
#StorageEngine/RocksDBEngine.cpp
#StorageEngine/RocksDBIndexFactory.cpp
StorageEngine/TransactionCollection.cpp
StorageEngine/TransactionState.cpp
Utils/AqlTransaction.cpp

View File

@ -46,7 +46,9 @@ AgencyCallbackRegistry::AgencyCallbackRegistry(std::string const& callbackBasePa
AgencyCallbackRegistry::~AgencyCallbackRegistry() {
}
bool AgencyCallbackRegistry::registerCallback(std::shared_ptr<AgencyCallback> cb) {
AgencyCommResult AgencyCallbackRegistry::registerCallback(
std::shared_ptr<AgencyCallback> cb) {
uint32_t rand;
{
WRITE_LOCKER(locker, _lock);
@ -58,22 +60,28 @@ bool AgencyCallbackRegistry::registerCallback(std::shared_ptr<AgencyCallback> cb
}
}
bool ok = false;
AgencyCommResult result;
try {
ok = _agency.registerCallback(cb->key, getEndpointUrl(rand));
if (!ok) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Registering callback failed";
result = _agency.registerCallback(cb->key, getEndpointUrl(rand));
if (!result.successful()) {
LOG_TOPIC(ERR, arangodb::Logger::AGENCY)
<< "Registering callback failed with " << result.errorCode() << ": "
<< result.errorMessage();
}
} catch (std::exception const& e) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Couldn't register callback " << e.what();
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Couldn't register callback " << e.what();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Couldn't register callback. Unknown exception";
LOG_TOPIC(ERR, arangodb::Logger::FIXME)
<< "Couldn't register callback. Unknown exception";
}
if (!ok) {
if (!result.successful()) {
WRITE_LOCKER(locker, _lock);
_endpoints.erase(rand);
}
return ok;
return result;
}
std::shared_ptr<AgencyCallback> AgencyCallbackRegistry::getCallback(uint32_t id) {

View File

@ -44,7 +44,7 @@ public:
//////////////////////////////////////////////////////////////////////////////
/// @brief register a callback
//////////////////////////////////////////////////////////////////////////////
bool registerCallback(std::shared_ptr<AgencyCallback>);
AgencyCommResult registerCallback(std::shared_ptr<AgencyCallback>);
//////////////////////////////////////////////////////////////////////////////
/// @brief unregister a callback

View File

@ -31,7 +31,7 @@
using ClusterEdgeCursor = arangodb::traverser::ClusterEdgeCursor;
ClusterEdgeCursor::ClusterEdgeCursor(VPackSlice v, size_t depth,
ClusterEdgeCursor::ClusterEdgeCursor(VPackSlice v, uint64_t depth,
arangodb::traverser::ClusterTraverser* traverser)
: _position(0) {
TransactionBuilderLeaser leased(traverser->_trx);

View File

@ -32,7 +32,7 @@ namespace traverser {
class ClusterEdgeCursor : public EdgeCursor {
public:
ClusterEdgeCursor(arangodb::velocypack::Slice, size_t, ClusterTraverser*);
ClusterEdgeCursor(arangodb::velocypack::Slice, uint64_t, ClusterTraverser*);
~ClusterEdgeCursor() {
}

View File

@ -282,12 +282,8 @@ void ClusterFeature::prepare() {
ServerState::instance()->setLocalInfo(_myLocalInfo);
if (!_myId.empty()) {
ServerState::instance()->setId(_myId);
}
if (!ServerState::instance()->registerWithRole(_requestedRole, _myAddress)) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Couldn't register at agency.";
if (!ServerState::instance()->integrateIntoCluster(_requestedRole, _myAddress, _myId)) {
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't integrate into cluster.";
FATAL_ERROR_EXIT();
}

View File

@ -1774,6 +1774,7 @@ int ClusterInfo::ensureIndexCoordinator(
errorMsg += trx.toJson();
errorMsg += "ClientId: " + result._clientId + " ";
errorMsg += " ResultCode: " + std::to_string(result.errorCode()) + " ";
errorMsg += " Result: " + result.errorMessage() + " ";
errorMsg += std::string(__FILE__) + ":" + std::to_string(__LINE__);
resultBuilder = *resBuilder;
}

View File

@ -91,7 +91,7 @@ bool ClusterTraverser::getVertex(VPackSlice edge,
}
bool ClusterTraverser::getSingleVertex(VPackSlice edge, VPackSlice comp,
size_t depth, VPackSlice& result) {
uint64_t depth, VPackSlice& result) {
bool res = _vertexGetter->getSingleVertex(edge, comp, depth, result);
if (res) {
if (_vertices.find(result) == _vertices.end()) {

View File

@ -65,7 +65,7 @@ class ClusterTraverser final : public Traverser {
/// Returns true if the vertex passes filtering conditions
bool getSingleVertex(arangodb::velocypack::Slice, arangodb::velocypack::Slice,
size_t, arangodb::velocypack::Slice&) override;
uint64_t, arangodb::velocypack::Slice&) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to fetch the real data of a vertex into an AQLValue

View File

@ -240,7 +240,8 @@ void HeartbeatThread::runDBServer() {
bool registered = false;
while (!registered) {
registered = _agencyCallbackRegistry->registerCallback(planAgencyCallback);
registered =
_agencyCallbackRegistry->registerCallback(planAgencyCallback).successful();
if (!registered) {
LOG_TOPIC(ERR, Logger::HEARTBEAT)
<< "Couldn't register plan change in agency!";

View File

@ -245,69 +245,160 @@ bool ServerState::unregister() {
return result.successful();
}
bool ServerState::registerShortName(std::string const& id, ServerState::RoleEnum const& role) {
// secondaries will not be handled here and will get assigned an auto generated one
if (role == ROLE_SECONDARY) {
return false;
}
AgencyComm comm;
AgencyCommResult result;
std::string agencyIdKey;
std::string roleName;
if (role == ROLE_PRIMARY) {
agencyIdKey = "LatestDBServerId";
roleName = "DBServer";
} else {
agencyIdKey = "LatestCoordinatorId";
roleName = "Coordinator";
}
uint32_t shortNum(0);
try {
shortNum = std::stoul(id.substr(roleName.size(), 3));
} catch (...) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Old id cannot be parsed for number.";
return false;
}
const std::string idKey = "Target/" + agencyIdKey;
const std::string mapKey = "Target/MapUniqueToShortID/" + id;
size_t attempts {0};
while (attempts++ < 300) {
result = comm.getValues("Target/" + agencyIdKey);
if (!result.successful()) {
LOG_TOPIC(WARN, Logger::CLUSTER) << "Couldn't fetch Target/" + agencyIdKey;
sleep(1);
continue;
}
VPackSlice latestId = result.slice()[0].get(
std::vector<std::string>(
{AgencyCommManager::path(), "Target", agencyIdKey}));
auto num = latestId.getNumber<uint32_t>();
std::vector<AgencyOperation> operations;
std::vector<AgencyPrecondition> preconditions;
std::stringstream ss; // ShortName
ss << roleName
<< std::setw(4) << std::setfill('0')
<< shortNum;
std::string shortName = ss.str();
VPackBuilder shortNumBuilder;
shortNumBuilder.add(VPackValue(shortNum));
VPackBuilder numBuilder;
numBuilder.add(VPackValue(num));
VPackBuilder mapBuilder;
{
VPackObjectBuilder b(&mapBuilder);
mapBuilder.add("TransactionID", shortNumBuilder.slice());
mapBuilder.add("ShortName", VPackValue(shortName));
}
operations.push_back(AgencyOperation(mapKey, AgencyValueOperationType::SET, mapBuilder.slice()));
preconditions.push_back(
AgencyPrecondition(idKey, AgencyPrecondition::Type::VALUE, numBuilder.slice())
);
if (num > shortNum) {
// possible conflict! our shortname might already be taken!
result = comm.getValues("Target/MapUniqueToShortID");
if (!result.successful()) {
LOG_TOPIC(WARN, Logger::CLUSTER) << "Couldn't fetch Target/MapUniqueToShortID";
sleep(1);
continue;
}
VPackSlice shortIdMap = result.slice()[0].get(std::vector<std::string>(
{AgencyCommManager::path(), "Target", "MapUniqueToShortID"}));
if (shortIdMap.isObject()) {
for (auto const& s : VPackObjectIterator(shortIdMap)) {
if (s.key.copyString() == "ShortName") {
if (arangodb::basics::VelocyPackHelper::getStringValue(s.value, "") == shortName) {
// our short name is taken. total disaster! very sad!
return false;
}
}
}
}
} else {
// update the number so it the next auto generated number is out of our taken range
operations.push_back({idKey, AgencyValueOperationType::SET, shortNumBuilder.slice()});
}
AgencyWriteTransaction trx(operations, preconditions);
result = comm.sendTransactionWithFailover(trx);
if (result.successful()) {
return true;
}
sleep(1);
}
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't register shortname for " << id;
FATAL_ERROR_EXIT();
return false;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief try to register with a role
/// @brief try to integrate into a cluster
////////////////////////////////////////////////////////////////////////////////
bool ServerState::registerWithRole(ServerState::RoleEnum role,
std::string const& myAddress) {
bool ServerState::integrateIntoCluster(ServerState::RoleEnum const& role,
std::string const& myAddress,
std::string const& myId) {
// id supplied via command line this is deprecated
if (!myId.empty()) {
if (!hasPersistedId()) {
setId(myId);
ServerState::RoleEnum roleInAgency = getRole();
// we are known to the agency under our old id!
if (roleInAgency != ServerState::ROLE_UNDEFINED) {
registerShortName(myId, roleInAgency);
writePersistedId(myId);
} else {
LOG_TOPIC(FATAL, Logger::STARTUP) << "started with --cluster.my-id but id unknown in agency!";
FATAL_ERROR_EXIT();
}
} else {
LOG_TOPIC(WARN, Logger::STARTUP) << "--cluster.my-id is deprecated and will be deleted.";
}
}
AgencyComm comm;
AgencyCommResult result;
std::string localInfoEncoded = StringUtils::replace(
StringUtils::urlEncode(getLocalInfo()),"%2E",".");
std::string locinf = "Target/MapLocalToID/" +
(localInfoEncoded.empty() ? "bogus_hass_hund" : localInfoEncoded);
std::string dbidinf = "Plan/DBServers/" +
(_id.empty() ? "bogus_hass_hund" : _id);
std::string coidinf = "Plan/Coordinators/" +
(_id.empty() ? "bogus_hass_hund" : _id);
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
AgencyGeneralTransaction reg;
reg.operations.push_back( // my-local-info
operationType(AgencyOperation(locinf), AgencyPrecondition()));
reg.operations.push_back( // db my-id
operationType(AgencyOperation(dbidinf), AgencyPrecondition()));
reg.operations.push_back( // cooord my-id
operationType(AgencyOperation(coidinf), AgencyPrecondition()));
result = comm.sendTransactionWithFailover(reg, 0.0);
std::string id;
if (result.slice().isArray()) {
VPackSlice targetSlice, planSlice;
if (!_id.empty()) {
try {
if (
result.slice()[1].get(
std::vector<std::string>({AgencyCommManager::path(), "Plan",
"DBServers", _id})).isString()) {
id = _id;
if (role == ServerState::ROLE_UNDEFINED) {
role = ServerState::ROLE_PRIMARY;
}
} else if (
result.slice()[2].get(
std::vector<std::string>({AgencyCommManager::path(), "Plan",
"Coordinators", _id})).isString()) {
id = _id;
if (role == ServerState::ROLE_UNDEFINED) {
role = ServerState::ROLE_COORDINATOR;
}
}
} catch (...) {}
} else if (!localInfoEncoded.empty()) {
try {
id = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), "Target",
"MapLocalToID", localInfoEncoded})).copyString();
} catch (...) {}
}
if (!hasPersistedId()) {
id = generatePersistedId(role);
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "Fresh start. Persisting new UUID " << id;
} else {
id = getPersistedId();
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "Restarting with persisted UUID " << id;
}
id = createIdForRole(comm, role, id);
setId(id);
registerAtAgency(comm, role, id);
const std::string agencyKey = roleToAgencyKey(role);
const std::string planKey = "Plan/" + agencyKey + "/" + id;
const std::string currentKey = "Current/" + agencyKey + "/" + id;
@ -319,7 +410,7 @@ bool ServerState::registerWithRole(ServerState::RoleEnum role,
found = false;
} else {
VPackSlice plan = result.slice()[0].get(std::vector<std::string>(
{AgencyCommManager::path(), "Plan", agencyKey, id}));
{AgencyCommManager::path(), "Plan", agencyKey, id}));
if (!plan.isString()) {
found = false;
} else {
@ -336,7 +427,7 @@ bool ServerState::registerWithRole(ServerState::RoleEnum role,
comm.setValue(planKey, plan, 0.0);
if (!result.successful()) {
LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't create plan "
<< result.errorMessage();
<< result.errorMessage();
return false;
}
}
@ -345,7 +436,7 @@ bool ServerState::registerWithRole(ServerState::RoleEnum role,
if (!result.successful()) {
LOG_TOPIC(ERR, Logger::CLUSTER) << "Could not talk to agency! "
<< result.errorMessage();
<< result.errorMessage();
return false;
}
@ -353,8 +444,8 @@ bool ServerState::registerWithRole(ServerState::RoleEnum role,
findAndSetRoleBlocking();
LOG_TOPIC(DEBUG, Logger::CLUSTER) << "We successfully announced ourselves as "
<< roleToString(role) << " and our id is "
<< id;
<< roleToString(role) << " and our id is "
<< id;
return true;
}
@ -387,130 +478,190 @@ void mkdir (std::string const& path) {
}
}
//////////////////////////////////////////////////////////////////////////////
/// @brief create an id for a specified role
/// TODO: why are the parameters passed by value here?
//////////////////////////////////////////////////////////////////////////////
std::string ServerState::createIdForRole(AgencyComm comm,
ServerState::RoleEnum role,
std::string id) {
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
std::string const agencyKey = roleToAgencyKey(role);
std::string roleName = ((role == ROLE_COORDINATOR) ? "Coordinator":"DBServer");
size_t shortNum(0);
VPackBuilder builder;
builder.add(VPackValue("none"));
AgencyCommResult createResult;
std::string ServerState::getUuidFilename() {
auto dbpath =
application_features::ApplicationServer::getFeature<DatabasePathFeature>(
"DatabasePath");
TRI_ASSERT(dbpath != nullptr);
mkdir (dbpath->directory());
auto filePath = dbpath->directory() + "/UUID";
std::ifstream ifs(filePath);
return dbpath->directory() + "/UUID";
}
if (!id.empty()) {
if (id.compare(0, roleName.size(), roleName) == 0) {
try {
shortNum = std::stoul(id.substr(roleName.size(),3));
} catch(...) {
LOG_TOPIC(DEBUG, Logger::CLUSTER) <<
"Old id cannot be parsed for number.";
}
}
bool ServerState::hasPersistedId() {
std::string uuidFilename = getUuidFilename();
return FileUtils::exists(uuidFilename);
}
bool ServerState::writePersistedId(std::string const& id) {
std::string uuidFilename = getUuidFilename();
std::ofstream ofs(uuidFilename);
if (!ofs.is_open()) {
LOG_TOPIC(FATAL, Logger::CLUSTER)
<< "Couldn't write id file " << getUuidFilename();
FATAL_ERROR_EXIT();
return false;
}
ofs << id << std::endl;
ofs.close();
return true;
}
std::string ServerState::generatePersistedId(RoleEnum const& role) {
std::string id = RoleStr.at(role) + "-" +
to_string(boost::uuids::random_generator()());
writePersistedId(id);
return id;
}
std::string ServerState::getPersistedId() {
std::string uuidFilename = getUuidFilename();
std::ifstream ifs(uuidFilename);
std::string id;
if (ifs.is_open()) {
std::getline(ifs, id);
ifs.close();
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "Restarting with persisted UUID " << id;
} else {
mkdir (dbpath->directory());
std::ofstream ofs(filePath);
if (id.empty()) {
id = RoleStr.at(role) + "-" +
to_string(boost::uuids::random_generator()());
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't open " << uuidFilename;
FATAL_ERROR_EXIT();
}
return id;
}
//////////////////////////////////////////////////////////////////////////////
/// @brief create an id for a specified role
//////////////////////////////////////////////////////////////////////////////
bool ServerState::registerAtAgency(AgencyComm& comm,
const ServerState::RoleEnum& role,
std::string const& id) {
typedef std::pair<AgencyOperation,AgencyPrecondition> operationType;
std::string agencyKey = role == ROLE_COORDINATOR ?
"Coordinators" : "DBServers";
std::string idKey = role == ROLE_COORDINATOR ?
"LatestCoordinatorId" : "LatestDBServerId";
if (role != ROLE_SECONDARY) {
VPackBuilder builder;
builder.add(VPackValue("none"));
AgencyCommResult createResult;
AgencyCommResult result = comm.getValues("Plan/" + agencyKey);
if (!result.successful()) {
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't fetch Plan/" << agencyKey
<< " from agency. Agency is not initialized?";
FATAL_ERROR_EXIT();
}
ofs << id << std::endl;
ofs.close();
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "Fresh start. Persisting new UUID " << id;
VPackSlice servers = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), "Plan", agencyKey}));
if (!servers.isObject()) {
LOG_TOPIC(FATAL, Logger::STARTUP) << "Plan/" << agencyKey << " in agency is no object. "
<< "Agency not initialized?";
FATAL_ERROR_EXIT();
}
VPackSlice entry = servers.get(id);
LOG_TOPIC(TRACE, Logger::STARTUP)
<< id << " found in existing keys: " << (!entry.isNone());
std::string planUrl = "Plan/" + agencyKey + "/" + id;
AgencyGeneralTransaction reg;
reg.operations.push_back( // Plan entry if not exists
operationType(
AgencyOperation(planUrl, AgencyValueOperationType::SET, builder.slice()),
AgencyPrecondition(planUrl, AgencyPrecondition::Type::EMPTY, true)));
// ok to fail (at least that was how it was before :S)
// XXX this should probably be sent as part of the transaction below
comm.sendTransactionWithFailover(reg, 0.0);
}
AgencyCommResult result = comm.getValues("Plan/" + agencyKey);
if (!result.successful()) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Couldn't fetch Plan/" << agencyKey
<< " from agency. Agency is not initialized?";
FATAL_ERROR_EXIT();
}
VPackSlice servers = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), "Plan", agencyKey}));
if (!servers.isObject()) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Plan/" << agencyKey << " in agency is no object. "
<< "Agency not initialized?";
FATAL_ERROR_EXIT();
}
VPackSlice entry = servers.get(id);
LOG_TOPIC(TRACE, Logger::STARTUP)
<< id << " found in existing keys: " << (!entry.isNone());
std::string targetIdStr =
(role == ROLE_COORDINATOR) ?
"Target/LatestCoordinatorId" : "Target/LatestDBServerId";
std::string planUrl = "Plan/" + agencyKey + "/" + id;
std::string targetUrl = "Target/MapUniqueToShortID/" + id;
VPackBuilder empty;
{ VPackObjectBuilder preconditionDefinition(&empty); }
AgencyGeneralTransaction reg;
reg.operations.push_back( // Plan entry if not exists
operationType(
AgencyOperation(planUrl, AgencyValueOperationType::SET, builder.slice()),
AgencyPrecondition(planUrl, AgencyPrecondition::Type::EMPTY, true)));
reg.operations.push_back( // ShortID incrementif not already got one
operationType(
AgencyOperation(targetIdStr, AgencySimpleOperationType::INCREMENT_OP),
AgencyPrecondition(targetUrl, AgencyPrecondition::Type::EMPTY, true)));
reg.operations.push_back( // Get shortID
operationType(AgencyOperation(targetIdStr), AgencyPrecondition()));
result = comm.sendTransactionWithFailover(reg, 0.0);
VPackSlice latestId = result.slice()[2].get(
size_t attempts {0};
while (attempts++ < 300) {
AgencyReadTransaction readValueTrx(std::vector<std::string>{AgencyCommManager::path() + "/" + targetIdStr, AgencyCommManager::path() + "/" + targetUrl});
AgencyCommResult result = comm.sendTransactionWithFailover(readValueTrx, 0.0);
if (!result.successful()) {
LOG_TOPIC(WARN, Logger::CLUSTER) << "Couldn't fetch " << targetIdStr
<< " and " << targetUrl;
sleep(1);
continue;
}
VPackSlice mapSlice = result.slice()[0].get(
std::vector<std::string>(
{AgencyCommManager::path(), "Target",
(role == ROLE_COORDINATOR) ?
"LatestCoordinatorId" : "LatestDBServerId"}));
VPackBuilder localIdBuilder;
{
VPackObjectBuilder b(&localIdBuilder);
localIdBuilder.add("TransactionID", latestId);
std::stringstream ss; // ShortName
ss << ((role == ROLE_COORDINATOR) ? "Coordinator" : "DBServer")
<< std::setw(4) << std::setfill('0')
<< (shortNum ==0 ? latestId.getNumber<uint32_t>() : shortNum);
std::string shortName = ss.str();
localIdBuilder.add("ShortName", VPackValue(shortName));
}
AgencyWriteTransaction shortId( // Set new shortID if not got one already
{AgencyOperation(
targetUrl, AgencyValueOperationType::SET, localIdBuilder.slice())},
AgencyPrecondition(targetUrl, AgencyPrecondition::Type::EMPTY, true)
{AgencyCommManager::path(), "Target", "MapUniqueToShortID", id}));
// already registered
if (!mapSlice.isNone()) {
return true;
}
VPackSlice latestId = result.slice()[0].get(
std::vector<std::string>(
{AgencyCommManager::path(), "Target", idKey}));
uint32_t num = 0;
std::unique_ptr<AgencyPrecondition> latestIdPrecondition;
VPackBuilder latestIdBuilder;
if (latestId.isNumber()) {
num = latestId.getNumber<uint32_t>();
latestIdBuilder.add(VPackValue(num));
latestIdPrecondition.reset(new AgencyPrecondition(targetIdStr, AgencyPrecondition::Type::VALUE, latestIdBuilder.slice()));
} else {
latestIdPrecondition.reset(new AgencyPrecondition(targetIdStr, AgencyPrecondition::Type::EMPTY, true));
}
VPackBuilder localIdBuilder;
{
VPackObjectBuilder b(&localIdBuilder);
localIdBuilder.add("TransactionID", VPackValue(num + 1));
std::stringstream ss; // ShortName
ss << ((role == ROLE_COORDINATOR) ? "Coordinator" : "DBServer")
<< std::setw(4) << std::setfill('0')
<< num + 1;
std::string shortName = ss.str();
localIdBuilder.add("ShortName", VPackValue(shortName));
}
std::vector<AgencyOperation> operations;
std::vector<AgencyPrecondition> preconditions;
operations.push_back(
AgencyOperation(targetIdStr, AgencySimpleOperationType::INCREMENT_OP)
);
result = comm.sendTransactionWithFailover(shortId, 0.0);
return id;
operations.push_back(
AgencyOperation(targetUrl, AgencyValueOperationType::SET, localIdBuilder.slice())
);
preconditions.push_back(*(latestIdPrecondition.get()));
preconditions.push_back(
AgencyPrecondition(targetUrl, AgencyPrecondition::Type::EMPTY, true)
);
AgencyWriteTransaction trx(operations, preconditions);
result = comm.sendTransactionWithFailover(trx, 0.0);
if (result.successful()) {
return true;
}
sleep(1);
}
LOG_TOPIC(FATAL, Logger::STARTUP) << "Couldn't register shortname for " << id;
FATAL_ERROR_EXIT();
return false;
}
////////////////////////////////////////////////////////////////////////////////
@ -1096,7 +1247,7 @@ bool ServerState::storeRole(RoleEnum role) {
builder.slice());
AgencyOperation incrementVersion("Plan/Version",
AgencySimpleOperationType::INCREMENT_OP);
AgencyPrecondition precondition(myId, AgencyPrecondition::Type::EMPTY, true);
AgencyPrecondition precondition(myId, AgencyPrecondition::Type::EMPTY, false);
trx.reset(new AgencyWriteTransaction({addMe, incrementVersion}, precondition));
// mop: try again for secondaries
fatalError = false;

View File

@ -139,7 +139,7 @@ class ServerState {
/// @brief get the server role
RoleEnum getRole();
bool registerWithRole(RoleEnum, std::string const&);
bool integrateIntoCluster(RoleEnum const&, std::string const&, std::string const&);
bool unregister();
@ -269,13 +269,20 @@ class ServerState {
/// @brief validate a state transition for a coordinator server
bool checkCoordinatorState(StateEnum);
/// @brief create an id for a specified role
/// TODO: why are the parameters passed by value here?
std::string createIdForRole(AgencyComm, RoleEnum, std::string = std::string());
/// @brief register at agency
bool registerAtAgency(AgencyComm&, const RoleEnum&, std::string const&);
/// @brief register shortname for an id
bool registerShortName(std::string const& id, const RoleEnum&);
/// @brief get the key for a role in the agency
static std::string roleToAgencyKey(RoleEnum);
std::string getUuidFilename();
std::string getPersistedId();
bool hasPersistedId();
std::string generatePersistedId(RoleEnum const&);
bool writePersistedId(std::string const&);
private:
/// @brief the pointer to the singleton instance
static ServerState* _theinstance;

View File

@ -263,40 +263,6 @@ static void JS_GetAgency(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief acquires a read-lock in the agency
////////////////////////////////////////////////////////////////////////////////
static void JS_LockReadAgency(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (args.Length() < 1) {
TRI_V8_THROW_EXCEPTION_USAGE("lockRead(<part>, <ttl>, <timeout>)");
}
std::string const part = TRI_ObjectToString(args[0]);
double ttl = 0.0;
if (args.Length() > 1) {
ttl = TRI_ObjectToDouble(args[1]);
}
double timeout = 0.0;
if (args.Length() > 2) {
timeout = TRI_ObjectToDouble(args[2]);
}
AgencyComm comm;
if (!comm.lockRead(part, ttl, timeout)) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unable to acquire lock");
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief read transaction to the agency
////////////////////////////////////////////////////////////////////////////////
@ -362,97 +328,6 @@ static void JS_TransactAgency(v8::FunctionCallbackInfo<v8::Value> const& args) {
/// @brief acquires a write-lock in the agency
////////////////////////////////////////////////////////////////////////////////
static void JS_LockWriteAgency(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (args.Length() < 1) {
TRI_V8_THROW_EXCEPTION_USAGE("lockWrite(<part>, <ttl>, <timeout>)");
}
std::string const part = TRI_ObjectToString(args[0]);
double ttl = 0.0;
if (args.Length() > 1) {
ttl = TRI_ObjectToDouble(args[1]);
}
double timeout = 0.0;
if (args.Length() > 2) {
timeout = TRI_ObjectToDouble(args[2]);
}
AgencyComm comm;
if (!comm.lockWrite(part, ttl, timeout)) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unable to acquire lock");
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief releases a read-lock in the agency
////////////////////////////////////////////////////////////////////////////////
static void JS_UnlockReadAgency(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (args.Length() > 2) {
TRI_V8_THROW_EXCEPTION_USAGE("unlockRead(<part>, <timeout>)");
}
std::string const part = TRI_ObjectToString(args[0]);
double timeout = 0.0;
if (args.Length() > 1) {
timeout = TRI_ObjectToDouble(args[1]);
}
AgencyComm comm;
if (!comm.unlockRead(part, timeout)) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unable to release lock");
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief releases a write-lock in the agency
////////////////////////////////////////////////////////////////////////////////
static void JS_UnlockWriteAgency(
v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);
v8::HandleScope scope(isolate);
if (args.Length() > 2) {
TRI_V8_THROW_EXCEPTION_USAGE("unlockWrite(<part>, <timeout>)");
}
std::string const part = TRI_ObjectToString(args[0]);
double timeout = 0.0;
if (args.Length() > 1) {
timeout = TRI_ObjectToDouble(args[1]);
}
AgencyComm comm;
if (!comm.unlockWrite(part, timeout)) {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unable to release lock");
}
TRI_V8_RETURN_TRUE();
TRI_V8_TRY_CATCH_END
}
////////////////////////////////////////////////////////////////////////////////
/// @brief removes a value from the agency
////////////////////////////////////////////////////////////////////////////////
@ -2162,10 +2037,6 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
JS_IsEnabledAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("increaseVersion"),
JS_IncreaseVersionAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("lockRead"),
JS_LockReadAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("lockWrite"),
JS_LockWriteAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("remove"),
JS_RemoveAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("set"), JS_SetAgency);
@ -2175,10 +2046,6 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
JS_PrefixAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("uniqid"),
JS_UniqidAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("unlockRead"),
JS_UnlockReadAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("unlockWrite"),
JS_UnlockWriteAgency);
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("version"),
JS_VersionAgency);

View File

@ -19,6 +19,7 @@
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#include "MMFilesEngine.h"
@ -544,11 +545,6 @@ TRI_vocbase_t* MMFilesEngine::createDatabaseMMFiles(TRI_voc_tick_t id, arangodb:
int res = 0;
waitUntilDeletion(id, true, res);
// //assert?!
// if (res != TRI_ERROR_NO_ERROR) {
// THROW_ARANGO_EXCEPTION(res);
// }
res = createDatabaseDirectory(id, name);
@ -842,6 +838,29 @@ void MMFilesEngine::createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collection
}
}
void MMFilesEngine::createIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool writeMarker, int& status){
status = TRI_ERROR_NO_ERROR;
if (!writeMarker) {
return;
}
try {
MMFilesCollectionMarker marker(TRI_DF_MARKER_VPACK_CREATE_INDEX,
vocbase->id(), collectionId, data);
MMFilesWalSlotInfoCopy slotInfo =
MMFilesLogfileManager::instance()->allocateAndWrite(marker, false);
status=slotInfo.errorCode;
} catch (arangodb::basics::Exception const& ex) {
status = ex.code();
} catch (...) {
status = TRI_ERROR_INTERNAL;
}
};
// asks the storage engine to drop the specified index and persist the deletion
// info. Note that physical deletion of the index must not be carried out by this call,
// as there may still be users of the index. It is recommended that this operation
@ -860,6 +879,29 @@ void MMFilesEngine::dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot remove index definition in file '" << filename << "': " << TRI_errno_string(res);
}
}
void MMFilesEngine::dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool writeMarker, int& error){
error = TRI_ERROR_NO_ERROR;
if (!writeMarker) {
return;
}
try {
MMFilesCollectionMarker marker(TRI_DF_MARKER_VPACK_DROP_INDEX,
vocbase->id(), collectionId, data);
MMFilesWalSlotInfoCopy slotInfo =
MMFilesLogfileManager::instance()->allocateAndWrite(marker, false);
error=slotInfo.errorCode;
} catch (arangodb::basics::Exception const& ex) {
error = ex.code();
} catch (...) {
error = TRI_ERROR_INTERNAL;
}
};
void MMFilesEngine::unloadCollection(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId) {
signalCleanup(vocbase);

View File

@ -19,6 +19,7 @@
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_STORAGE_ENGINE_MMFILES_ENGINE_H
@ -56,26 +57,22 @@ class MMFilesEngine final : public StorageEngine {
// inherited from ApplicationFeature
// ---------------------------------
// add the storage engine's specifc options to the global list of options
void collectOptions(std::shared_ptr<options::ProgramOptions>) override;
// validate the storage engine's specific options
void validateOptions(std::shared_ptr<options::ProgramOptions>) override;
// preparation phase for storage engine. can be used for internal setup.
// the storage engine must not start any threads here or write any files
void prepare() override;
// initialize engine
void start() override;
void stop() override;
bool inRecovery() override;
// called when recovery is finished
void recoveryDone(TRI_vocbase_t* vocbase) override;
// flush wal wait for collector
void stop() override;
TransactionState* createTransactionState(TRI_vocbase_t*) override;
TransactionCollection* createTransactionCollection(TransactionState* state, TRI_voc_cid_t cid, AccessMode::Type accessType, int nestingLevel) override;
@ -86,33 +83,6 @@ class MMFilesEngine final : public StorageEngine {
// inventory functionality
// -----------------------
Database* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) override {
status = TRI_ERROR_NO_ERROR;
return createDatabaseMMFiles(id,args);
}
void dropDatabase(Database* database, int& status) override;
std::string getName(Database* db) const override {
return db->name();
}
std::string getPath(Database* db) const override {
return databaseDirectory(db->id());
}
std::string getName(Database*, CollectionView*) const override {
throw std::logic_error("not implemented");
return "not implemented";
}
std::string getPath(Database*, CollectionView* coll) const override {
throw std::logic_error("not implemented");
return collectionDirectory(0, 0);
}
// fill the Builder object with an array of databases that were detected
// by the storage engine. this method must sort out databases that were not
// fully created (see "createDatabase" below). called at server start only
@ -129,52 +99,41 @@ class MMFilesEngine final : public StorageEngine {
int getCollectionsAndIndexes(TRI_vocbase_t* vocbase, arangodb::velocypack::Builder& result,
bool wasCleanShutdown, bool isUpgrade) override;
// return the path for a database
std::string databasePath(TRI_vocbase_t const* vocbase) const override {
return databaseDirectory(vocbase->id());
}
// return the path for a collection
std::string collectionPath(TRI_vocbase_t const* vocbase, TRI_voc_cid_t id) const override {
return collectionDirectory(vocbase->id(), id);
}
virtual TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& parameters, bool isUpgrade, int&) override;
// database, collection and index management
// -----------------------------------------
// asks the storage engine to create a database as specified in the VPack
// Slice object and persist the creation info. It is guaranteed by the server that
// no other active database with the same name and id exists when this function
// is called. If this operation fails somewhere in the middle, the storage
// engine is required to fully clean up the creation and throw only then,
// so that subsequent database creation requests will not fail.
// the WAL entry for the database creation will be written *after* the call
// to "createDatabase" returns
// return the path for a database
std::string databasePath(TRI_vocbase_t const* vocbase) const override {
return databaseDirectory(vocbase->id());
}
virtual TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& parameters, bool isUpgrade, int&) override;
Database* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) override {
status = TRI_ERROR_NO_ERROR;
return createDatabaseMMFiles(id,args);
}
int writeCreateMarker(TRI_voc_tick_t id, VPackSlice const& slice) override;
void prepareDropDatabase(TRI_vocbase_t* vocbase, bool useWriteMarker, int& status) override;
void dropDatabase(Database* database, int& status) override;
void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) override;
// wal in recovery
bool inRecovery() override;
// start compactor thread and delete files form collections marked as deleted
void recoveryDone(TRI_vocbase_t* vocbase) override;
private:
int dropDatabaseMMFiles(TRI_vocbase_t* vocbase);
TRI_vocbase_t* createDatabaseMMFiles(TRI_voc_tick_t id, arangodb::velocypack::Slice const& data);
// asks the storage engine to drop the specified database and persist the
// deletion info. Note that physical deletion of the database data must not
// be carried out by this call, as there may still be readers of the database's data.
// It is recommended that this operation only sets a deletion flag for the database
// but let's an async task perform the actual deletion.
// the WAL entry for database deletion will be written *after* the call
// to "prepareDropDatabase" returns == TODO UPDATE
void prepareDropDatabase(TRI_vocbase_t* vocbase, bool useWriteMarker, int& status) override;
// perform a physical deletion of the database
int dropDatabaseMMFiles(TRI_vocbase_t* vocbase);
/// @brief wait until a database directory disappears
void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) override;
/// @brief writes a create-database marker into the log
int writeCreateMarker(TRI_voc_tick_t id, VPackSlice const& slice);
public:
// asks the storage engine to create a collection as specified in the VPack
// Slice object and persist the creation info. It is guaranteed by the server
// that no other active collection with the same name and id exists in the same
@ -220,6 +179,9 @@ class MMFilesEngine final : public StorageEngine {
// to "createIndex" returns
void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id, arangodb::velocypack::Slice const& data) override;
virtual void createIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool useMarker, int&) override;
// asks the storage engine to drop the specified index and persist the deletion
// info. Note that physical deletion of the index must not be carried out by this call,
@ -231,6 +193,9 @@ class MMFilesEngine final : public StorageEngine {
void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id) override;
void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool writeMarker, int&) override;
void unloadCollection(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId) override;
void signalCleanup(TRI_vocbase_t* vocbase) override;

View File

@ -18,6 +18,7 @@
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#include "DatabaseFeature.h"

View File

@ -35,8 +35,7 @@ using namespace arangodb::options;
FileDescriptorsFeature::FileDescriptorsFeature(
application_features::ApplicationServer* server)
: ApplicationFeature(server, "FileDescriptors"),
_descriptorsMinimum(1024) {
: ApplicationFeature(server, "FileDescriptors"), _descriptorsMinimum(1024) {
setOptional(false);
requiresElevatedPrivileges(false);
startsAfter("Logger");
@ -53,9 +52,7 @@ void FileDescriptorsFeature::collectOptions(
#endif
}
void FileDescriptorsFeature::prepare() {
adjustFileDescriptors();
}
void FileDescriptorsFeature::prepare() { adjustFileDescriptors(); }
#ifdef TRI_HAVE_GETRLIMIT
template <typename T>
@ -74,9 +71,10 @@ void FileDescriptorsFeature::start() {
int res = getrlimit(RLIMIT_NOFILE, &rlim);
if (res == 0) {
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "file-descriptors (nofiles) hard limit is "
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
<< StringifyLimitValue(rlim.rlim_cur);
LOG_TOPIC(INFO, arangodb::Logger::SYSCALL)
<< "file-descriptors (nofiles) hard limit is "
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
<< StringifyLimitValue(rlim.rlim_cur);
}
#endif
}
@ -88,19 +86,21 @@ void FileDescriptorsFeature::adjustFileDescriptors() {
int res = getrlimit(RLIMIT_NOFILE, &rlim);
if (res != 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot get the file descriptor limit: " << strerror(errno);
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot get the file descriptor limit: " << strerror(errno);
FATAL_ERROR_EXIT();
}
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "file-descriptors (nofiles) hard limit is "
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
<< StringifyLimitValue(rlim.rlim_cur);
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
<< "file-descriptors (nofiles) hard limit is "
<< StringifyLimitValue(rlim.rlim_max) << ", soft limit is "
<< StringifyLimitValue(rlim.rlim_cur);
bool changed = false;
if (rlim.rlim_max < _descriptorsMinimum) {
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "hard limit " << rlim.rlim_max
<< " is too small, trying to raise";
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
<< "hard limit " << rlim.rlim_max << " is too small, trying to raise";
rlim.rlim_max = _descriptorsMinimum;
rlim.rlim_cur = _descriptorsMinimum;
@ -108,23 +108,25 @@ void FileDescriptorsFeature::adjustFileDescriptors() {
res = setrlimit(RLIMIT_NOFILE, &rlim);
if (res < 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot raise the file descriptor limit to "
<< _descriptorsMinimum << ": " << strerror(errno);
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot raise the file descriptor limit to "
<< _descriptorsMinimum << ": " << strerror(errno);
FATAL_ERROR_EXIT();
}
changed = true;
} else if (rlim.rlim_cur < _descriptorsMinimum) {
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "soft limit " << rlim.rlim_cur
<< " is too small, trying to raise";
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME)
<< "soft limit " << rlim.rlim_cur << " is too small, trying to raise";
rlim.rlim_cur = _descriptorsMinimum;
res = setrlimit(RLIMIT_NOFILE, &rlim);
if (res < 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot raise the file descriptor limit to "
<< _descriptorsMinimum << ": " << strerror(errno);
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
<< "cannot raise the file descriptor limit to "
<< _descriptorsMinimum << ": " << strerror(errno);
FATAL_ERROR_EXIT();
}
@ -135,14 +137,15 @@ void FileDescriptorsFeature::adjustFileDescriptors() {
res = getrlimit(RLIMIT_NOFILE, &rlim);
if (res != 0) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot get the file descriptor limit: "
<< strerror(errno);
LOG_TOPIC(FATAL, arangodb::Logger::SYSCALL)
<< "cannot get the file descriptor limit: " << strerror(errno);
FATAL_ERROR_EXIT();
}
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "file-descriptors (nofiles) new hard limit is "
<< StringifyLimitValue(rlim.rlim_max) << ", new soft limit is "
<< StringifyLimitValue(rlim.rlim_cur);
LOG_TOPIC(INFO, arangodb::Logger::SYSCALL)
<< "file-descriptors (nofiles) new hard limit is "
<< StringifyLimitValue(rlim.rlim_max) << ", new soft limit is "
<< StringifyLimitValue(rlim.rlim_cur);
}
}
#endif

View File

@ -70,26 +70,6 @@ class RocksDBEngine final : public StorageEngine {
// inventory functionality
// -----------------------
//return empty string when not found
virtual std::string getName(Database*) const {
throw std::logic_error("not implemented");
return TRI_ERROR_NO_ERROR;
};
virtual std::string getPath(Database*) const {
throw std::logic_error("not implemented");
return TRI_ERROR_NO_ERROR;
};
virtual std::string getName(Database*, CollectionView*) const override {
throw std::logic_error("not implemented");
return TRI_ERROR_NO_ERROR;
};
virtual std::string getPath(Database*, CollectionView*) const override {
throw std::logic_error("not implemented");
return TRI_ERROR_NO_ERROR;
};
// fill the Builder object with an array of databases that were detected
// by the storage engine. this method must sort out databases that were not
// fully created (see "createDatabase" below). called at server start only

View File

@ -19,6 +19,7 @@
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_STORAGE_ENGINE_STORAGE_ENGINE_H
@ -70,9 +71,6 @@ class StorageEngine : public application_features::ApplicationFeature {
virtual void start() {}
virtual void stop() {}
virtual bool inRecovery() { return false; }
virtual void recoveryDone(TRI_vocbase_t* vocbase) {}
virtual int writeCreateMarker(TRI_voc_tick_t id, VPackSlice const& slice) = 0;
virtual TransactionState* createTransactionState(TRI_vocbase_t*) = 0;
virtual TransactionCollection* createTransactionCollection(TransactionState*, TRI_voc_cid_t, AccessMode::Type, int nestingLevel) = 0;
@ -112,18 +110,21 @@ class StorageEngine : public application_features::ApplicationFeature {
// return the path for a collection
virtual std::string collectionPath(TRI_vocbase_t const* vocbase, TRI_voc_cid_t id) const = 0;
//virtual TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& parameters, bool isUpgrade) = 0;
// database, collection and index management
// -----------------------------------------
// if not stated other wise functions may throw and the caller has to take care of error handling
// the return values will be the usual TRI_ERROR_* codes.
// TODO add pre / post conditions for functions
// TODO add pre / post conditions
using Database = TRI_vocbase_t;
using CollectionView = LogicalCollection;
// if not stated other wise functions may throw and the caller has to take care of error handling
// the return values will be the usual TRI_ERROR_* codes
//// operations on databasea
/// @brief opens a database
virtual Database* openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade, int& status) = 0;
Database* openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade){
int status;
@ -133,12 +134,6 @@ class StorageEngine : public application_features::ApplicationFeature {
return rv;
}
//return empty string when not found
virtual std::string getName(Database*) const = 0;
virtual std::string getPath(Database*) const = 0;
virtual std::string getName(Database*, CollectionView*) const = 0;
virtual std::string getPath(Database*, CollectionView*) const = 0;
// asks the storage engine to create a database as specified in the VPack
// Slice object and persist the creation info. It is guaranteed by the server that
// no other active database with the same name and id exists when this function
@ -157,6 +152,9 @@ class StorageEngine : public application_features::ApplicationFeature {
return rv;
}
// @brief wirte create marker for database
virtual int writeCreateMarker(TRI_voc_tick_t id, VPackSlice const& slice) = 0;
// asks the storage engine to drop the specified database and persist the
// deletion info. Note that physical deletion of the database data must not
// be carried out by this call, as there may still be readers of the database's data.
@ -169,15 +167,10 @@ class StorageEngine : public application_features::ApplicationFeature {
virtual void prepareDropDatabase(TRI_vocbase_t* vocbase, bool useWriteMarker, int& status) = 0;
void prepareDropDatabase(Database* db, bool useWriteMarker){
int status = 0;
prepareDropDatabase(db, status);
prepareDropDatabase(db, useWriteMarker, status);
TRI_ASSERT(status == TRI_ERROR_NO_ERROR);
};
/// @brief wait until a database directory disappears
//
// should not require a lock
virtual void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) = 0;
// perform a physical deletion of the database
virtual void dropDatabase(Database*, int& status) = 0;
void dropDatabase(Database* db){
@ -185,11 +178,18 @@ class StorageEngine : public application_features::ApplicationFeature {
dropDatabase(db, status);
TRI_ASSERT(status == TRI_ERROR_NO_ERROR);
};
/// @brief wait until a database directory disappears - not under lock in databaseFreature
virtual void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) = 0;
/// @brief is database in recovery
virtual bool inRecovery() { return false; }
/// @brief function to be run when recovery is done
virtual void recoveryDone(TRI_vocbase_t* vocbase) {}
public:
//// Operations on Collections
// asks the storage engine to create a collection as specified in the VPack
// Slice object and persist the creation info. It is guaranteed by the server
// that no other active collection with the same name and id exists in the same
@ -236,6 +236,9 @@ public:
virtual void createIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id, arangodb::velocypack::Slice const& data) = 0;
virtual void createIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool useMarker, int&) = 0;
// asks the storage engine to drop the specified index and persist the deletion
// info. Note that physical deletion of the index must not be carried out by this call,
// as there may still be users of the index. It is recommended that this operation
@ -246,6 +249,8 @@ public:
virtual void dropIndex(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
TRI_idx_iid_t id) = 0;
virtual void dropIndexWalMarker(TRI_vocbase_t* vocbase, TRI_voc_cid_t collectionId,
arangodb::velocypack::Slice const& data, bool useMarker, int&) = 0;
// Returns the StorageEngine-specific implementation
// of the IndexFactory. This is used to validate
// information about indexes.

View File

@ -43,13 +43,13 @@
#include "Scheduler/SchedulerFeature.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "MMFiles/MMFilesDocumentOperation.h"
#include "MMFiles/MMFilesLogfileManager.h"
//#include "MMFiles/MMFilesLogfileManager.h"
#include "MMFiles/MMFilesPrimaryIndex.h"
#include "MMFiles/MMFilesIndexElement.h"
#include "MMFiles/MMFilesToken.h"
#include "MMFiles/MMFilesTransactionState.h"
#include "MMFiles/MMFilesWalMarker.h"
#include "MMFiles/MMFilesWalSlots.h"
#include "MMFiles/MMFilesWalMarker.h" //crud marker -- TODO remove
#include "MMFiles/MMFilesWalSlots.h" //TODO -- remove
#include "StorageEngine/StorageEngine.h"
#include "StorageEngine/TransactionState.h"
#include "Utils/CollectionNameResolver.h"
@ -1322,7 +1322,7 @@ void LogicalCollection::open(bool ignoreErrors) {
}
}
if (!MMFilesLogfileManager::instance()->isInRecovery()) {
if (!engine->inRecovery()) {
// build the index structures, and fill the indexes
fillIndexes(&trx, *(indexList()));
}
@ -1430,7 +1430,6 @@ std::shared_ptr<Index> LogicalCollection::createIndex(Transaction* trx,
VPackSlice const& info,
bool& created) {
// TODO Get LOCK for the vocbase
auto idx = lookupIndex(info);
if (idx != nullptr) {
created = false;
@ -1466,8 +1465,8 @@ std::shared_ptr<Index> LogicalCollection::createIndex(Transaction* trx,
THROW_ARANGO_EXCEPTION(res);
}
bool const writeMarker =
!MMFilesLogfileManager::instance()->isInRecovery();
bool const writeMarker = !engine->inRecovery();
// !MMFilesLogfileManager::instance()->isInRecovery();
res = saveIndex(idx.get(), writeMarker);
if (res != TRI_ERROR_NO_ERROR) {
@ -1537,6 +1536,8 @@ int LogicalCollection::saveIndex(arangodb::Index* idx, bool writeMarker) {
std::shared_ptr<VPackBuilder> builder;
try {
builder = idx->toVelocyPack(false);
} catch (arangodb::basics::Exception const& ex) {
return ex.code();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot save index definition";
return TRI_ERROR_INTERNAL;
@ -1548,33 +1549,9 @@ int LogicalCollection::saveIndex(arangodb::Index* idx, bool writeMarker) {
StorageEngine* engine = EngineSelectorFeature::ENGINE;
engine->createIndex(_vocbase, cid(), idx->id(), builder->slice());
if (!writeMarker) {
return TRI_ERROR_NO_ERROR;
}
int res = TRI_ERROR_NO_ERROR;
try {
MMFilesCollectionMarker marker(TRI_DF_MARKER_VPACK_CREATE_INDEX,
_vocbase->id(), cid(), builder->slice());
MMFilesWalSlotInfoCopy slotInfo =
MMFilesLogfileManager::instance()->allocateAndWrite(marker,
false);
if (slotInfo.errorCode != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(slotInfo.errorCode);
}
return TRI_ERROR_NO_ERROR;
} catch (arangodb::basics::Exception const& ex) {
res = ex.code();
} catch (...) {
res = TRI_ERROR_INTERNAL;
}
// TODO: what to do here?
engine->createIndexWalMarker(_vocbase, cid(), builder->slice(), writeMarker,res);
return res;
}
@ -1644,38 +1621,20 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid, bool writeMarker) {
if (writeMarker) {
int res = TRI_ERROR_NO_ERROR;
try {
VPackBuilder markerBuilder;
markerBuilder.openObject();
markerBuilder.add("id", VPackValue(std::to_string(iid)));
markerBuilder.close();
MMFilesCollectionMarker marker(TRI_DF_MARKER_VPACK_DROP_INDEX,
_vocbase->id(), cid(),
markerBuilder.slice());
MMFilesWalSlotInfoCopy slotInfo =
MMFilesLogfileManager::instance()->allocateAndWrite(marker,
false);
if (slotInfo.errorCode != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(slotInfo.errorCode);
}
VPackBuilder markerBuilder;
markerBuilder.openObject();
markerBuilder.add("id", VPackValue(std::to_string(iid)));
markerBuilder.close();
engine->dropIndexWalMarker(_vocbase, cid(), markerBuilder.slice(),writeMarker,res);
if(! res){
events::DropIndex("", std::to_string(iid), TRI_ERROR_NO_ERROR);
return true;
} catch (basics::Exception const& ex) {
res = ex.code();
} catch (...) {
res = TRI_ERROR_INTERNAL;
}
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not save index drop marker in log: "
} else {
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "could not save index drop marker in log: "
<< TRI_errno_string(res);
events::DropIndex("", std::to_string(iid), res);
// TODO: what to do here?
events::DropIndex("", std::to_string(iid), res);
}
}
return true;
}

View File

@ -222,10 +222,10 @@ class BreadthFirstEnumerator final : public PathEnumerator {
/// @brief Marker for the search depth. Used to abort searching.
//////////////////////////////////////////////////////////////////////////////
size_t _currentDepth;
uint64_t _currentDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief position in _toSerach. If this is >= _toSearch.size() we are done
/// @brief position in _toSearch. If this is >= _toSearch.size() we are done
/// with this depth.
//////////////////////////////////////////////////////////////////////////////
@ -288,7 +288,7 @@ class NeighborsEnumerator final : public PathEnumerator {
_lastDepth;
std::unordered_set<arangodb::basics::VPackHashedSlice, arangodb::basics::VelocyPackHelper::VPackHashedStringHash, arangodb::basics::VelocyPackHelper::VPackHashedStringEqual>::iterator _iterator;
size_t _searchDepth;
uint64_t _searchDepth;
//////////////////////////////////////////////////////////////////////////////
/// @brief Vector storing the position at current search depth

View File

@ -259,6 +259,6 @@ bool SingleServerTraverser::getVertex(VPackSlice edge,
}
bool SingleServerTraverser::getSingleVertex(VPackSlice edge, VPackSlice vertex,
size_t depth, VPackSlice& result) {
uint64_t depth, VPackSlice& result) {
return _vertexGetter->getSingleVertex(edge, vertex, depth, result);
}

View File

@ -95,7 +95,7 @@ class SingleServerTraverser final : public Traverser {
/// Returns true if the vertex passes filtering conditions
bool getSingleVertex(arangodb::velocypack::Slice, arangodb::velocypack::Slice,
size_t depth, arangodb::velocypack::Slice&) override;
uint64_t depth, arangodb::velocypack::Slice&) override;
//////////////////////////////////////////////////////////////////////////////
/// @brief Function to fetch the real data of a vertex into an AQLValue

View File

@ -89,7 +89,7 @@ bool Traverser::VertexGetter::getVertex(
bool Traverser::VertexGetter::getSingleVertex(VPackSlice edge,
VPackSlice cmp,
size_t depth,
uint64_t depth,
VPackSlice& result) {
VPackSlice from = Transaction::extractFromFromDocument(edge);
if (from != cmp) {
@ -132,7 +132,7 @@ bool Traverser::UniqueVertexGetter::getVertex(
}
bool Traverser::UniqueVertexGetter::getSingleVertex(
VPackSlice edge, VPackSlice cmp, size_t depth, VPackSlice& result) {
VPackSlice edge, VPackSlice cmp, uint64_t depth, VPackSlice& result) {
result = Transaction::extractFromFromDocument(edge);
if (cmp == result) {
@ -181,7 +181,7 @@ Traverser::Traverser(arangodb::traverser::TraverserOptions* opts, arangodb::Tran
bool arangodb::traverser::Traverser::edgeMatchesConditions(VPackSlice e,
VPackSlice vid,
size_t depth,
uint64_t depth,
size_t cursorId) {
if (!_opts->evaluateEdgeExpression(e, vid, depth, cursorId)) {
++_filteredPaths;
@ -190,7 +190,7 @@ bool arangodb::traverser::Traverser::edgeMatchesConditions(VPackSlice e,
return true;
}
bool arangodb::traverser::Traverser::vertexMatchesConditions(VPackSlice v, size_t depth) {
bool arangodb::traverser::Traverser::vertexMatchesConditions(VPackSlice v, uint64_t depth) {
TRI_ASSERT(v.isString());
if (_opts->vertexHasFilter(depth)) {
aql::AqlValue vertex = fetchVertexData(v);

View File

@ -184,7 +184,7 @@ class Traverser {
std::vector<arangodb::velocypack::Slice>&);
virtual bool getSingleVertex(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, size_t,
arangodb::velocypack::Slice, uint64_t,
arangodb::velocypack::Slice&);
virtual void reset(arangodb::velocypack::Slice);
@ -208,7 +208,7 @@ class Traverser {
std::vector<arangodb::velocypack::Slice>&) override;
bool getSingleVertex(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, size_t,
arangodb::velocypack::Slice, uint64_t,
arangodb::velocypack::Slice&) override;
void reset(arangodb::velocypack::Slice) override;
@ -268,7 +268,7 @@ class Traverser {
/// Returns true if the vertex passes filtering conditions
virtual bool getSingleVertex(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, size_t,
arangodb::velocypack::Slice, uint64_t,
arangodb::velocypack::Slice&) = 0;
public:
@ -329,9 +329,9 @@ class Traverser {
bool hasMore() { return !_done; }
bool edgeMatchesConditions(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, size_t, size_t);
arangodb::velocypack::Slice, uint64_t, size_t);
bool vertexMatchesConditions(arangodb::velocypack::Slice, size_t);
bool vertexMatchesConditions(arangodb::velocypack::Slice, uint64_t);
void allowOptimizedNeighbors();

View File

@ -296,7 +296,7 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
}
_depthLookupInfo.reserve(read.length());
for (auto const& depth : VPackObjectIterator(read)) {
size_t d = basics::StringUtils::uint64(depth.key.copyString());
uint64_t d = basics::StringUtils::uint64(depth.key.copyString());
auto it = _depthLookupInfo.emplace(d, std::vector<LookupInfo>());
TRI_ASSERT(it.second);
VPackSlice list = depth.value;
@ -318,7 +318,7 @@ arangodb::traverser::TraverserOptions::TraverserOptions(
_vertexExpressions.reserve(read.length());
for (auto const& info : VPackObjectIterator(read)) {
size_t d = basics::StringUtils::uint64(info.key.copyString());
uint64_t d = basics::StringUtils::uint64(info.key.copyString());
#ifdef ARANGODB_ENABLE_MAINAINER_MODE
auto it = _vertexExpressions.emplace(
d, new aql::Expression(query->ast(), info.value));
@ -528,7 +528,7 @@ void arangodb::traverser::TraverserOptions::buildEngineInfo(VPackBuilder& result
}
bool arangodb::traverser::TraverserOptions::vertexHasFilter(
size_t depth) const {
uint64_t depth) const {
if (_baseVertexExpression != nullptr) {
return true;
}
@ -537,7 +537,7 @@ bool arangodb::traverser::TraverserOptions::vertexHasFilter(
bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
arangodb::velocypack::Slice edge, arangodb::velocypack::Slice vertex,
size_t depth, size_t cursorId) const {
uint64_t depth, size_t cursorId) const {
if (_isCoordinator) {
// The Coordinator never checks conditions. The DBServer is responsible!
return true;
@ -589,7 +589,7 @@ bool arangodb::traverser::TraverserOptions::evaluateEdgeExpression(
}
bool arangodb::traverser::TraverserOptions::evaluateVertexExpression(
arangodb::velocypack::Slice vertex, size_t depth) const {
arangodb::velocypack::Slice vertex, uint64_t depth) const {
arangodb::aql::Expression* expression = nullptr;
auto specific = _vertexExpressions.find(depth);
@ -620,7 +620,7 @@ bool arangodb::traverser::TraverserOptions::evaluateVertexExpression(
arangodb::traverser::EdgeCursor*
arangodb::traverser::TraverserOptions::nextCursor(ManagedDocumentResult* mmdr,
VPackSlice vertex,
size_t depth) const {
uint64_t depth) const {
if (_isCoordinator) {
return nextCursorCoordinator(vertex, depth);
}
@ -637,7 +637,7 @@ arangodb::traverser::TraverserOptions::nextCursor(ManagedDocumentResult* mmdr,
arangodb::traverser::EdgeCursor*
arangodb::traverser::TraverserOptions::nextCursorLocal(ManagedDocumentResult* mmdr,
VPackSlice vertex, size_t depth, std::vector<LookupInfo>& list) const {
VPackSlice vertex, uint64_t depth, std::vector<LookupInfo>& list) const {
TRI_ASSERT(mmdr != nullptr);
auto allCursor = std::make_unique<SingleServerEdgeCursor>(mmdr, _trx, list.size());
auto& opCursors = allCursor->getCursors();
@ -670,7 +670,7 @@ arangodb::traverser::TraverserOptions::nextCursorLocal(ManagedDocumentResult* mm
arangodb::traverser::EdgeCursor*
arangodb::traverser::TraverserOptions::nextCursorCoordinator(
VPackSlice vertex, size_t depth) const {
VPackSlice vertex, uint64_t depth) const {
TRI_ASSERT(_traverser != nullptr);
auto cursor = std::make_unique<ClusterEdgeCursor>(vertex, depth, _traverser);
return cursor.release();

View File

@ -98,8 +98,8 @@ struct TraverserOptions {
arangodb::Transaction* _trx;
protected:
std::vector<LookupInfo> _baseLookupInfos;
std::unordered_map<size_t, std::vector<LookupInfo>> _depthLookupInfo;
std::unordered_map<size_t, aql::Expression*> _vertexExpressions;
std::unordered_map<uint64_t, std::vector<LookupInfo>> _depthLookupInfo;
std::unordered_map<uint64_t, aql::Expression*> _vertexExpressions;
aql::Expression* _baseVertexExpression;
aql::Variable const* _tmpVar;
aql::FixedVarExpressionContext* _ctx;
@ -151,15 +151,15 @@ struct TraverserOptions {
/// for DBServer traverser engines.
void buildEngineInfo(arangodb::velocypack::Builder&) const;
bool vertexHasFilter(size_t) const;
bool vertexHasFilter(uint64_t) const;
bool evaluateEdgeExpression(arangodb::velocypack::Slice,
arangodb::velocypack::Slice, size_t,
arangodb::velocypack::Slice, uint64_t,
size_t) const;
bool evaluateVertexExpression(arangodb::velocypack::Slice, size_t) const;
bool evaluateVertexExpression(arangodb::velocypack::Slice, uint64_t) const;
EdgeCursor* nextCursor(ManagedDocumentResult*, arangodb::velocypack::Slice, size_t) const;
EdgeCursor* nextCursor(ManagedDocumentResult*, arangodb::velocypack::Slice, uint64_t) const;
void clearVariableValues();
@ -171,10 +171,10 @@ struct TraverserOptions {
private:
EdgeCursor* nextCursorLocal(ManagedDocumentResult*,
arangodb::velocypack::Slice, size_t,
arangodb::velocypack::Slice, uint64_t,
std::vector<LookupInfo>&) const;
EdgeCursor* nextCursorCoordinator(arangodb::velocypack::Slice, size_t) const;
EdgeCursor* nextCursorCoordinator(arangodb::velocypack::Slice, uint64_t) const;
};
}

View File

@ -157,65 +157,3 @@ macro(to_native_path sourceVarName)
endif()
set("INC_${sourceVarName}" ${myVar})
endmacro()
macro(install_debinfo
STRIP_DIR
USER_SUB_DEBINFO_DIR
USER_FILE
USER_STRIP_FILE)
set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR})
set(FILE ${USER_FILE})
set(STRIP_FILE ${STRIP_DIR}/${USER_STRIP_FILE})
execute_process(COMMAND mkdir -p ${STRIP_DIR})
if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(
COMMAND ${FILE_EXECUTABLE} ${FILE}
OUTPUT_VARIABLE FILE_RESULT)
string(REGEX
REPLACE ".*=([a-z0-9]*),.*" "\\1"
FILE_CHECKSUM
${FILE_RESULT}
)
string(LENGTH ${FILE_CHECKSUM} FILE_CHECKSUM_LEN)
if (FILE_CHECKSUM_LEN EQUAL 40)
string(SUBSTRING ${FILE_CHECKSUM} 0 2 SUB_DIR)
string(SUBSTRING ${FILE_CHECKSUM} 2 -1 STRIP_FILE)
set(SUB_DEBINFO_DIR .build-id/${SUB_DIR})
set(STRIP_FILE "${STRIP_FILE}.debug")
else ()
set(STRIP_FILE ${USER_STRIP_FILE})
endif()
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_DIR}/${STRIP_FILE})
set(FILE ${STRIP_DIR}/${STRIP_FILE})
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${SUB_DEBINFO_DIR})
endif()
endmacro()
# Detect whether this system has SHA checksums
macro(detect_binary_id_type sourceVar)
set(${sourceVar} false)
if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE)
execute_process(
COMMAND ${FILE_EXECUTABLE} ${FILE_EXECUTABLE}
OUTPUT_VARIABLE FILE_RESULT)
string(REGEX
REPLACE ".*=([a-z0-9]*),.*" "\\1"
FILE_CHECKSUM
${FILE_RESULT}
)
string(LENGTH ${FILE_CHECKSUM} FILE_CHECKSUM_LEN)
if (FILE_CHECKSUM_LEN EQUAL 40)
set(${sourceVar} true)
endif()
endif()
endmacro()

View File

@ -0,0 +1,65 @@
################################################################################
## INSTALL debug information on *nux systems
################################################################################
macro(install_debinfo
STRIP_DIR
USER_SUB_DEBINFO_DIR
USER_FILE
USER_STRIP_FILE)
set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR})
set(FILE ${USER_FILE})
set(STRIP_FILE ${STRIP_DIR}/${USER_STRIP_FILE})
execute_process(COMMAND mkdir -p ${STRIP_DIR})
if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE)
execute_process(COMMAND "rm" -f ${STRIP_FILE})
execute_process(
COMMAND ${FILE_EXECUTABLE} ${FILE}
OUTPUT_VARIABLE FILE_RESULT)
string(REGEX
REPLACE ".*=([a-z0-9]*),.*" "\\1"
FILE_CHECKSUM
${FILE_RESULT}
)
string(LENGTH ${FILE_CHECKSUM} FILE_CHECKSUM_LEN)
if (FILE_CHECKSUM_LEN EQUAL 40)
string(SUBSTRING ${FILE_CHECKSUM} 0 2 SUB_DIR)
string(SUBSTRING ${FILE_CHECKSUM} 2 -1 STRIP_FILE)
set(SUB_DEBINFO_DIR .build-id/${SUB_DIR})
set(STRIP_FILE "${STRIP_FILE}.debug")
else ()
set(STRIP_FILE ${USER_STRIP_FILE})
endif()
execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_DIR}/${STRIP_FILE})
set(FILE ${STRIP_DIR}/${STRIP_FILE})
install(
PROGRAMS ${FILE}
DESTINATION ${CMAKE_INSTALL_DEBINFO_DIR}/${SUB_DEBINFO_DIR})
endif()
endmacro()
# Detect whether this system has SHA checksums
macro(detect_binary_id_type sourceVar)
set(${sourceVar} false)
if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE)
execute_process(
COMMAND ${FILE_EXECUTABLE} ${FILE_EXECUTABLE}
OUTPUT_VARIABLE FILE_RESULT)
string(REGEX
REPLACE ".*=([a-z0-9]*),.*" "\\1"
FILE_CHECKSUM
${FILE_RESULT}
)
string(LENGTH ${FILE_CHECKSUM} FILE_CHECKSUM_LEN)
if (FILE_CHECKSUM_LEN EQUAL 40)
set(${sourceVar} true)
endif()
endif()
endmacro()

View File

@ -115,6 +115,7 @@ set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${AR
set(INSTALL_MACROS_NO_TARGET_INSTALL TRUE)
include(${ORIGINAL_SOURCE_DIR}/cmake/debugInformation.cmake)
include(${ORIGINAL_SOURCE_DIR}/cmake/InstallMacros.cmake)
include(${ORIGINAL_SOURCE_DIR}/arangosh/dbg.cmake)
include(${ORIGINAL_SOURCE_DIR}/arangod/dbg.cmake)

View File

@ -111,11 +111,13 @@ add_custom_target(remove_packages
list(APPEND CLEAN_PACKAGES_LIST remove_packages)
set(SYMSRVDIR $ENV{SYMSRV})
if (NOT ${SYMSRVDIR} STREQUAL "")
message("Storing symbols:")
add_custom_command(TARGET ${BIN_ARANGOD} POST_BUILD
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
COMMAND "find -name \\*pdb |grep -v Release |grep -v Debug |grep -v 3rdParty |grep -v vc120.pdb > pdbfiles_list.txt"
COMMAND "symstore.exe add /f '@${PROJECT_BINARY_DIR}/pdbfiles_list.txt' /s '${SYMSRVDIR}' /t ArangoDB /compress")
endif()
# set(SYMSRVDIR $ENV{SYMSRV})
# if (NOT ${SYMSRVDIR} STREQUAL "")
#
# message("Storing symbols:")
# include(../../arangod/CMakeList.txt)
# add_custom_command(TARGET ${BIN_ARANGOD} POST_BUILD
# WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
# COMMAND "find -name \\*pdb |grep -v Release |grep -v Debug |grep -v 3rdParty |grep -v vc120.pdb > pdbfiles_list.txt"
# COMMAND "symstore.exe add /f '@${PROJECT_BINARY_DIR}/pdbfiles_list.txt' /s '${SYMSRVDIR}' /t ArangoDB /compress")
# endif()

View File

@ -36,6 +36,8 @@ var cluster = require('@arangodb/cluster');
//var internal = require('internal');
var _ = require('lodash');
var fetchKey = cluster.fetchKey;
// //////////////////////////////////////////////////////////////////////////////
// / @brief was docuBlock JSF_cluster_test_GET
// //////////////////////////////////////////////////////////////////////////////
@ -449,40 +451,15 @@ actions.defineHttp({
}
var primary = req.parameters.primary;
var timeout = 60.0;
try {
if (req.parameters.hasOwnProperty('timeout')) {
timeout = Number(req.parameters.timeout);
}
} catch (e) {}
// Now get to work, first get the write lock on the Plan in the Agency:
var success = ArangoAgency.lockRead('Plan', timeout);
if (!success) {
actions.resultError(req, res, actions.HTTP_REQUEST_TIMEOUT, 0,
'could not get a read lock on Plan in Agency');
return;
var agency = ArangoAgency.get('Plan/DBServers/' + primary);
let secondary = fetchKey(agency, 'arango', 'Plan', 'DBServers', primary);
if (secondary === undefined) {
actions.resultError(req, res, actions.HTTP_NOT_FOUND, 0,
'Primary with the given ID is not configured in Agency.');
}
try {
var oldValue;
try {
oldValue = ArangoAgency.get('Plan/DBServers/' + primary, false, false);
oldValue = oldValue.arango.Plan.DBServers[primary];
} catch (e1) {
actions.resultError(req, res, actions.HTTP_NOT_FOUND, 0,
'Primary with the given ID is not configured in Agency.');
return;
}
oldValue = oldValue['Plan/DBServers/' + primary];
actions.resultOk(req, res, actions.HTTP_OK, { primary: primary,
secondary: oldValue });
} finally {
ArangoAgency.unlockRead('Plan', timeout);
}
actions.resultOk(req, res, actions.HTTP_OK, { primary: primary,
secondary: secondary });
}
});
@ -562,67 +539,32 @@ actions.defineHttp({
'"newSecondary" are given in body and are strings');
return;
}
let agency = ArangoAgency.get('Plan/DBServers/' + body.primary);
var ttl = 60.0;
var timeout = 60.0;
if (body.hasOwnProperty('ttl') && typeof body.ttl === 'number') {
ttl = body.ttl;
}
if (body.hasOwnProperty('timeout') && typeof body.timeout === 'number') {
timeout = body.timeout;
}
// Now get to work, first get the write lock on the Plan in the Agency:
var success = ArangoAgency.lockWrite('Plan', ttl, timeout);
if (!success) {
actions.resultError(req, res, actions.HTTP_REQUEST_TIMEOUT, 0,
'could not get a write lock on Plan in Agency');
if (fetchKey(agency, 'arango', 'Plan', 'DBServers', body.primary) === undefined) {
actions.resultError(req, res, actions.HTTP_NOT_FOUND, 0,
'Primary with the given ID is not configured in Agency.');
return;
}
try {
var oldValue;
try {
oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary, false,
false);
if (oldValue.arango.Plan.DBServers.hasOwnProperty(body.primary)) {
oldValue = oldValue.arango.Plan.DBServers[body.primary];
} else {
throw true;
}
} catch (e1) {
actions.resultError(req, res, actions.HTTP_NOT_FOUND, 0,
'Primary with the given ID is not configured in Agency.');
return;
}
let operations = {};
operations['/arango/Plan/DBServers/' + body.primary] = body.newSecondary;
operations['/arango/Plan/Version'] = {'op': 'increment'};
if (oldValue !== body.oldSecondary) {
let preconditions = {};
preconditions['/arango/Plan/DBServers/' + body.primary] = {'old': body.oldSecondary};
try {
global.ArangoAgency.write([[operations, preconditions]]);
} catch (e) {
if (e.code === 412) {
let oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary);
actions.resultError(req, res, actions.HTTP_PRECONDITION_FAILED, 0,
'Primary does not have the given oldSecondary as ' +
'its secondary, current value: ' + oldValue);
'its secondary, current value: ' + JSON.stringify(oldValue));
return;
}
try {
ArangoAgency.set('Plan/DBServers/' + body.primary, body.newSecondary,
0);
} catch (e2) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot change secondary of given primary.');
return;
}
try {
ArangoAgency.increaseVersion('Plan/Version');
} catch (e3) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot increase Plan/Version.');
return;
}
actions.resultOk(req, res, actions.HTTP_OK, body);
} finally {
ArangoAgency.unlockWrite('Plan', timeout);
throw e;
}
}
});
@ -635,38 +577,29 @@ actions.defineHttp({
function changeAllShardReponsibilities (oldServer, newServer) {
// This is only called when we have the write lock and we "only" have to
// make sure that either all or none of the shards are moved.
var collections = ArangoAgency.get('Plan/Collections', true, false);
var collections = ArangoAgency.get('Plan/Collections');
collections = collections.arango.Plan.Collections;
var done = {};
try {
Object.keys(collections).forEach(function (collectionKey) {
var collection = collections[collectionKey];
var old = _.cloneDeep(collection);
Object.keys(collection.shards).forEach(function (shardKey) {
var servers = collection.shards[shardKey];
collection.shards[shardKey] = servers.map(function (server) {
if (server === oldServer) {
return newServer;
} else {
return server;
}
});
let operations = {};
let preconditions = {};
Object.keys(collections).forEach(function (collectionKey) {
var collection = collections[collectionKey];
var old = _.cloneDeep(collection);
Object.keys(collection.shards).forEach(function (shardKey) {
var servers = collection.shards[shardKey];
collection.shards[shardKey] = servers.map(function (server) {
if (server === oldServer) {
return newServer;
} else {
return server;
}
});
ArangoAgency.set(collectionKey, collection, 0);
done[collectionKey] = old;
});
} catch (e) {
// mop: rollback
try {
Object.keys(done).forEach(function (collectionKey) {
ArangoAgency.set(collectionKey, done[collectionKey], 0);
});
} catch (e2) {
console.error('Got error during rollback', e2);
}
throw e;
}
operations[collectionKey] = collection;
preconditions[collectionKey] = old;
});
return {operations, preconditions};
}
// //////////////////////////////////////////////////////////////////////////////
@ -744,100 +677,33 @@ actions.defineHttp({
return;
}
var ttl = 60.0;
var timeout = 60.0;
let operations = {};
operations['Plan/DBServers/' + body.secondary] = body.primary;
operations['Plan/DBServers/' + body.primary] = {'op': 'delete'};
operations['Plan/Version'] = {'op': 'increment'};
if (body.hasOwnProperty('ttl') && typeof body.ttl === 'number') {
ttl = body.ttl;
}
if (body.hasOwnProperty('timeout') && typeof body.timeout === 'number') {
timeout = body.timeout;
}
let preconditions = {};
preconditions['Plan/DBServers/' + body.primary] = {'old': body.secondary};
// Now get to work, first get the write lock on the Plan in the Agency:
var success = ArangoAgency.lockWrite('Plan', ttl, timeout);
if (!success) {
actions.resultError(req, res, actions.HTTP_REQUEST_TIMEOUT, 0,
'could not get a write lock on Plan in Agency');
return;
}
let shardChanges = changeAllShardReponsibilities(body.primary, body.secondary);
operations = Object.assign(operations, shardChanges.operations);
preconditions = Object.assign(preconditions, shardChanges.preconditions);
try {
var oldValue;
try {
oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary, false,
false);
oldValue = oldValue.arango.Plan.DBservers[body.primary];
} catch (e1) {
actions.resultError(req, res, actions.HTTP_NOT_FOUND, 0,
'Primary with the given ID is not configured in Agency.');
return;
}
oldValue = oldValue['Plan/DBServers/' + body.primary];
if (oldValue !== body.secondary) {
global.ArangoAgency.write([[operations, preconditions]]);
} catch (e) {
if (e.code === 412) {
let oldValue = ArangoAgency.get('Plan/DBServers/' + body.primary);
actions.resultError(req, res, actions.HTTP_PRECONDITION_FAILED, 0,
'Primary does not have the given secondary as ' +
'Primary does not have the given oldSecondary as ' +
'its secondary, current value: ' + oldValue);
return;
}
try {
ArangoAgency.remove('Plan/DBServers/' + body.primary, false);
} catch (e2) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot remove old primary entry.');
return;
}
try {
ArangoAgency.set('Plan/DBServers/' + body.secondary,
body.primary, 0);
} catch (e3) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot set secondary as primary.');
// Try to reset the old primary:
try {
ArangoAgency.set('Plan/DBServers/' + body.primary,
body.secondary, 0);
} catch (e4) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot set secondary as primary, could not ' +
'even reset the old value!');
}
return;
}
try {
// Now change all responsibilities for shards to the "new" primary
// body.secondary:
changeAllShardReponsibilities(body.primary, body.secondary);
} catch (e5) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Could not change responsibilities for shards.');
// Try to reset the old primary:
try {
ArangoAgency.set('Plan/DBServers/' + body.primary,
body.secondary, 0);
ArangoAgency.remove('Plan/DBServers/' + body.secondary);
} catch (e4) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot change responsibility for shards and ' +
'could not even reset the old value!');
}
return;
}
try {
ArangoAgency.increaseVersion('Plan/Version');
} catch (e3) {
actions.resultError(req, res, actions.HTTP_SERVER_ERROR, 0,
'Cannot increase Plan/Version.');
return;
}
actions.resultOk(req, res, actions.HTTP_OK, {primary: body.secondary,
secondary: body.primary});
} finally {
ArangoAgency.unlockWrite('Plan', timeout);
throw e;
}
actions.resultOk(req, res, actions.HTTP_OK, {primary: body.secondary,
secondary: body.primary});
}
});

View File

@ -13,19 +13,19 @@
<tr>
<td>Knows Graph</td>
<td>
<button style="float: right" graph-id="knows_graph" class="button-success createExampleGraphs">Create</button>
<button style="float: right; margin-bottom: 10px;" graph-id="knows_graph" class="button-success createExampleGraphs">Create</button>
</td>
</tr>
<tr>
<td>Social Graph</td>
<td>
<button style="float: right" graph-id="social" class="button-success createExampleGraphs">Create</button>
<button style="float: right; margin-bottom: 10px;" graph-id="social" class="button-success createExampleGraphs">Create</button>
</td>
</tr>
<tr>
<td>Routeplanner Graph</td>
<td>
<button style="float: right" graph-id="routeplanner" class="button-success createExampleGraphs">Create</button>
<button style="float: right; margin-bottom: 10px;" graph-id="routeplanner" class="button-success createExampleGraphs">Create</button>
</td>
</tr>
</tbody>

View File

@ -326,7 +326,6 @@
},
setFromAndTo: function (e) {
console.log(e);
e.stopPropagation();
var map = this.calculateEdgeDefinitionMap();
var id;
@ -607,6 +606,7 @@
// if smart graph
if ($('#new-is_smart').is(':checked')) {
if ($('#new-numberOfShards').val() === '' || $('#new-smartGraphAttribute').val() === '') {
arangoHelper.arangoError('Smart Graph enabled', 'numberOfShards or smartGraphAttribute not set!');
return;
} else {
newCollectionObject.isSmart = true;
@ -810,7 +810,7 @@
window.modalView.createCheckboxEntry(
'new-is_smart',
'Smart Graph',
true,
false,
'Create a Smart Graph? Edge and vertex collections will be automatically generated. They are not allowed to be present before graph creation.',
false
)

View File

@ -2095,3 +2095,4 @@ exports.executePlanForDatabases = executePlanForDatabases;
exports.executePlanForCollections = executePlanForCollections;
exports.updateCurrentForDatabases = updateCurrentForDatabases;
exports.updateCurrentForCollections = updateCurrentForCollections;
exports.fetchKey = fetchKey;

View File

@ -40,8 +40,4 @@ exports.setup = function() {
}, keyValue);
return value;
};
global.ArangoAgency.lockRead = function() { return true; };
global.ArangoAgency.lockWrite = function() { return true; };
global.ArangoAgency.unlockRead = function() { return true; };
global.ArangoAgency.unlockWrite = function() { return true; };
};

View File

@ -24,10 +24,10 @@
#include <iostream>
#include "Basics/directories.h"
#include "Basics/ArangoGlobalContext.h"
#include "Basics/FileUtils.h"
#include "Basics/StringUtils.h"
#include "Basics/ArangoGlobalContext.h"
#include "Basics/directories.h"
#include "Logger/Logger.h"
#include "ProgramOptions/IniFileParser.h"
#include "ProgramOptions/ProgramOptions.h"
@ -58,7 +58,8 @@ void ConfigFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
options->addHiddenOption("--config", "the configuration file or 'none'",
new StringParameter(&_file));
options->addHiddenOption("--define,-D", "define key=value for a @key@ entry in config file",
options->addHiddenOption("--define,-D",
"define key=value for a @key@ entry in config file",
new VectorParameter<StringParameter>(&_defines));
options->addHiddenOption("--check-configuration",
@ -83,117 +84,112 @@ void ConfigFeature::loadConfigFile(std::shared_ptr<ProgramOptions> options,
std::string const& progname,
char const* binaryPath) {
if (StringUtils::tolower(_file) == "none") {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "use no config file at all";
LOG_TOPIC(DEBUG, Logger::CONFIG) << "using no config file at all";
return;
}
std::vector<std::string> files;
std::set<std::string> seen;
// always prefer an explicitly given config file
if (_file.empty()) {
files.emplace_back(progname);
} else {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "using user supplied config file '"
<< _file << "'";
IniFileParser parser(options.get());
if (!parser.parse(_file)) {
exit(EXIT_FAILURE);
}
auto includes = parser.includes();
files.insert(files.end(), includes.begin(), includes.end());
LOG_TOPIC(DEBUG, Logger::CONFIG) << "seen @includes: " << includes;
}
for (size_t i = 0; i < files.size(); ++i) {
auto name = files[i];
if (seen.find(name) != seen.end()) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "circular includes, seen '" << name << "' twice";
if (!_file.empty()) {
if (!FileUtils::exists(_file)) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "cannot read config file '" << _file
<< "'";
FATAL_ERROR_EXIT();
}
seen.insert(name);
auto local = _file + ".local";
// clang-format off
//
// check in order:
//
// <PRGNAME>.conf
// ./etc/relative/<PRGNAME>.conf
// ${HOME}/.arangodb/<PRGNAME>.conf
// /etc/arangodb/<PRGNAME>.conf
//
// clang-format on
auto context = ArangoGlobalContext::CONTEXT;
std::string basename = name;
std::string filename;
if (!StringUtils::isSuffix(name, ".conf")) {
basename += ".conf";
}
if (context != nullptr) {
filename = FileUtils::buildFilename(FileUtils::buildFilename(context->runRoot(), _SYSCONFDIR_), basename);
LOG_TOPIC(DEBUG, Logger::CONFIG) << "checking '" << filename << "'";
}
if (filename.length() == 0 || !FileUtils::exists(filename)) {
filename = FileUtils::buildFilename(FileUtils::currentDirectory(), basename);
LOG_TOPIC(DEBUG, Logger::CONFIG) << "checking '" << filename << "'";
if (!FileUtils::exists(filename)) {
filename = FileUtils::buildFilename(FileUtils::currentDirectory(),
"etc/relative/" + basename);
LOG_TOPIC(DEBUG, Logger::CONFIG) << "checking '" << filename << "'";
if (!FileUtils::exists(filename)) {
filename =
FileUtils::buildFilename(FileUtils::homeDirectory(), basename);
LOG_TOPIC(DEBUG, Logger::CONFIG) << "checking '" << filename << "'";
if (!FileUtils::exists(filename)) {
filename =
FileUtils::buildFilename(FileUtils::configDirectory(binaryPath), basename);
LOG_TOPIC(DEBUG, Logger::CONFIG) << "checking '" << filename << "'";
if (!FileUtils::exists(filename)) {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "cannot find any config file";
}
}
}
}
}
IniFileParser parser(options.get());
std::string local = filename + ".local";
LOG_TOPIC(DEBUG, Logger::CONFIG) << "checking override '" << local << "'";
if (FileUtils::exists(local)) {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "loading '" << local << "'";
LOG_TOPIC(DEBUG, Logger::CONFIG) << "loading override '" << local << "'";
if (!parser.parse(local)) {
exit(EXIT_FAILURE);
FATAL_ERROR_EXIT();
}
}
LOG_TOPIC(DEBUG, Logger::CONFIG) << "loading '" << filename << "'";
LOG_TOPIC(DEBUG, Logger::CONFIG) << "using user supplied config file '"
<< _file << "'";
if (!parser.parse(filename)) {
exit(EXIT_FAILURE);
if (!parser.parse(_file)) {
FATAL_ERROR_EXIT();
}
auto includes = parser.includes();
files.insert(files.end(), includes.begin(), includes.end());
return;
}
// clang-format off
//
// check the following location in this order:
//
// <PRGNAME>.conf
// ./etc/relative/<PRGNAME>.conf
// ${HOME}/.arangodb/<PRGNAME>.conf
// /etc/arangodb/<PRGNAME>.conf
//
// clang-format on
auto context = ArangoGlobalContext::CONTEXT;
std::string basename = progname;
if (!StringUtils::isSuffix(basename, ".conf")) {
basename += ".conf";
}
std::vector<std::string> locations;
if (context != nullptr) {
auto root = context->runRoot();
auto location = FileUtils::buildFilename(root, _SYSCONFDIR_);
LOG_TOPIC(TRACE, Logger::CONFIG) << "checking root location '" << root
<< "'";
locations.emplace_back(location);
}
locations.emplace_back(FileUtils::currentDirectory());
locations.emplace_back(FileUtils::buildFilename(FileUtils::currentDirectory(),
"etc", "relative"));
locations.emplace_back(
FileUtils::buildFilename(FileUtils::homeDirectory(), ".arangodb"));
locations.emplace_back(FileUtils::configDirectory(binaryPath));
std::string filename;
for (auto const& location : locations) {
auto name = FileUtils::buildFilename(location, basename);
LOG_TOPIC(TRACE, Logger::CONFIG) << "checking config file '" << name << "'";
if (FileUtils::exists(name)) {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "found config file '" << name << "'";
filename = name;
break;
}
}
if (filename.empty()) {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "cannot find any config file";
}
IniFileParser parser(options.get());
std::string local = filename + ".local";
LOG_TOPIC(TRACE, Logger::CONFIG) << "checking override '" << local << "'";
if (FileUtils::exists(local)) {
LOG_TOPIC(DEBUG, Logger::CONFIG) << "loading override '" << local << "'";
if (!parser.parse(local)) {
FATAL_ERROR_EXIT();
}
} else {
LOG_TOPIC(TRACE, Logger::CONFIG) << "no override file found";
}
LOG_TOPIC(DEBUG, Logger::CONFIG) << "loading '" << filename << "'";
if (!parser.parse(filename)) {
exit(EXIT_FAILURE);
}
}

View File

@ -31,203 +31,94 @@ namespace arangodb {
namespace basics {
class StringBuffer;
////////////////////////////////////////////////////////////////////////////////
/// @brief collection of file functions
////////////////////////////////////////////////////////////////////////////////
namespace FileUtils {
////////////////////////////////////////////////////////////////////////////////
/// @brief removes trailing path separators from path
///
/// path will be modified in-place
////////////////////////////////////////////////////////////////////////////////
// removes trailing path separators from path, path will be modified in-place
std::string removeTrailingSeparator(std::string const& name);
////////////////////////////////////////////////////////////////////////////////
/// @brief normalizes path
///
/// path will be modified in-place
////////////////////////////////////////////////////////////////////////////////
// normalizes path, path will be modified in-place
void normalizePath(std::string& name);
////////////////////////////////////////////////////////////////////////////////
/// @brief makes a path absolute
///
/// path will be modified in-place
////////////////////////////////////////////////////////////////////////////////
void makePathAbsolute(std::string &path);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates a filename
////////////////////////////////////////////////////////////////////////////////
// makes a path absolute, path will be modified in-place
void makePathAbsolute(std::string& path);
// creates a filename
std::string buildFilename(char const* path, char const* name);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates a filename
////////////////////////////////////////////////////////////////////////////////
// creates a filename
std::string buildFilename(std::string const& path, std::string const& name);
////////////////////////////////////////////////////////////////////////////////
/// @brief throws a read error
////////////////////////////////////////////////////////////////////////////////
template <typename... Args>
inline std::string buildFilename(std::string path, std::string name, Args... args) {
return buildFilename(buildFilename(path, name), args...);
}
// throws a read error
void throwFileReadError(int fd, std::string const& filename);
////////////////////////////////////////////////////////////////////////////////
/// @brief throws a write error
////////////////////////////////////////////////////////////////////////////////
// throws a write error
void throwFileWriteError(int fd, std::string const& filename);
////////////////////////////////////////////////////////////////////////////////
/// @brief reads file into string
////////////////////////////////////////////////////////////////////////////////
// reads file into string
std::string slurp(std::string const& filename);
////////////////////////////////////////////////////////////////////////////////
/// @brief reads file into string buffer
////////////////////////////////////////////////////////////////////////////////
void slurp(std::string const& filename, StringBuffer&);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates file and writes string to it
////////////////////////////////////////////////////////////////////////////////
// creates file and writes string to it
void spit(std::string const& filename, char const* ptr, size_t len);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates file and writes string to it
////////////////////////////////////////////////////////////////////////////////
void spit(std::string const& filename, std::string const& content);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates file and writes string to it
////////////////////////////////////////////////////////////////////////////////
void spit(std::string const& filename, StringBuffer const& content);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns true if a file could be removed
////////////////////////////////////////////////////////////////////////////////
// returns true if a file could be removed
bool remove(std::string const& fileName, int* errorNumber = 0);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns true if a file could be renamed
////////////////////////////////////////////////////////////////////////////////
bool rename(std::string const& oldName, std::string const& newName,
int* errorNumber = 0);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates a new directory
////////////////////////////////////////////////////////////////////////////////
// creates a new directory
bool createDirectory(std::string const& name, int* errorNumber = 0);
////////////////////////////////////////////////////////////////////////////////
/// @brief creates a new directory with mask
////////////////////////////////////////////////////////////////////////////////
bool createDirectory(std::string const& name, int mask, int* errorNumber = 0);
////////////////////////////////////////////////////////////////////////////////
/// @brief copies directories / files recursive
////////////////////////////////////////////////////////////////////////////////
// copies directories / files recursive
bool copyRecursive(std::string const& source, std::string const& target,
std::string& error);
////////////////////////////////////////////////////////////////////////////////
/// @brief copies directories / files recursive; start source node has to be a
/// directory.
////////////////////////////////////////////////////////////////////////////////
bool copyDirectoryRecursive(std::string const& source,
std::string const& target, std::string& error);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns list of files
////////////////////////////////////////////////////////////////////////////////
// returns list of files
std::vector<std::string> listFiles(std::string const& directory);
////////////////////////////////////////////////////////////////////////////////
/// @brief checks if path is a directory
////////////////////////////////////////////////////////////////////////////////
// checks if path is a directory
bool isDirectory(std::string const& path);
////////////////////////////////////////////////////////////////////////////////
/// @brief checks if path is a symbolic link
////////////////////////////////////////////////////////////////////////////////
// checks if path is a symbolic link
bool isSymbolicLink(std::string const& path);
////////////////////////////////////////////////////////////////////////////////
/// @brief checks if path is a regular file
////////////////////////////////////////////////////////////////////////////////
// checks if path is a regular file
bool isRegularFile(std::string const& path);
////////////////////////////////////////////////////////////////////////////////
/// @brief checks if path exists
////////////////////////////////////////////////////////////////////////////////
// checks if path exists
bool exists(std::string const& path);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the size of a file. will return 0 for non-existing files
///
// returns the size of a file. will return 0 for non-existing files
/// the caller should check first if the file exists via the exists() method
////////////////////////////////////////////////////////////////////////////////
off_t size(std::string const& path);
////////////////////////////////////////////////////////////////////////////////
/// @brief strip extension
////////////////////////////////////////////////////////////////////////////////
// strip extension
std::string stripExtension(std::string const& path,
std::string const& extension);
////////////////////////////////////////////////////////////////////////////////
/// @brief changes into directory
////////////////////////////////////////////////////////////////////////////////
// changes into directory
bool changeDirectory(std::string const& path);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the current directory
////////////////////////////////////////////////////////////////////////////////
// returns the current directory
std::string currentDirectory(int* errorNumber = 0);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the home directory
////////////////////////////////////////////////////////////////////////////////
// returns the home directory
std::string homeDirectory();
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the config directory
////////////////////////////////////////////////////////////////////////////////
// returns the config directory
std::string configDirectory(char const* binaryPath);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the base name
////////////////////////////////////////////////////////////////////////////////
// returns the dir name of a path
std::string dirname(std::string const&);
}
}

View File

@ -57,6 +57,7 @@ LogTopic Logger::PERFORMANCE("performance", LogLevel::FATAL); // suppress
LogTopic Logger::QUERIES("queries", LogLevel::INFO);
LogTopic Logger::REPLICATION("replication", LogLevel::INFO);
LogTopic Logger::REQUESTS("requests", LogLevel::FATAL); // suppress
LogTopic Logger::SSL("ssl", LogLevel::WARN);
LogTopic Logger::STARTUP("startup", LogLevel::INFO);
LogTopic Logger::SUPERVISION("supervision", LogLevel::INFO);
LogTopic Logger::SYSCALL("syscall", LogLevel::WARN);

View File

@ -46,11 +46,15 @@ class LogTopic {
LogTopic(std::string const& name, LogLevel level);
LogTopic(LogTopic const& that) : _id(that._id), _name(that._name), _displayName(that._displayName) {
LogTopic(LogTopic const& that)
: _id(that._id), _name(that._name), _displayName(that._displayName) {
_level.store(that._level, std::memory_order_relaxed);
}
LogTopic(LogTopic&& that) noexcept : _id(that._id), _name(std::move(that._name)), _displayName(std::move(that._displayName)) {
LogTopic(LogTopic&& that) noexcept
: _id(that._id),
_name(std::move(that._name)),
_displayName(std::move(that._displayName)) {
_level.store(that._level, std::memory_order_relaxed);
}

View File

@ -143,6 +143,7 @@ class Logger {
static LogTopic QUERIES;
static LogTopic REPLICATION;
static LogTopic REQUESTS;
static LogTopic SSL;
static LogTopic STARTUP;
static LogTopic SUPERVISION;
static LogTopic SYSCALL;

View File

@ -56,8 +56,9 @@ class IniFileParser {
"^[ \t]*(([-_A-Za-z0-9]*\\.)?[-_A-Za-z0-9]*)[ \t]*=[ \t]*(.*?)?[ \t]*$",
std::regex::ECMAScript);
// an include line
_matchers.include = std::regex(
"^[ \t]*@include[ \t]*([-_A-Za-z0-9/\\.]*)[ \t]*$", std::regex::ECMAScript);
_matchers.include =
std::regex("^[ \t]*@include[ \t]*([-_A-Za-z0-9/\\.]*)[ \t]*$",
std::regex::ECMAScript);
}
// parse a config file. returns true if all is well, false otherwise
@ -108,10 +109,24 @@ class IniFileParser {
isEnterprise = true;
} else if (std::regex_match(line, match, _matchers.include)) {
// found include
std::string option;
std::string value(match[1].str());
std::string include(match[1].str());
_includes.emplace_back(value);
if (!basics::StringUtils::isSuffix(include, ".conf")) {
include += ".conf";
}
if (_seen.find(include) != _seen.end()) {
LOG_TOPIC(FATAL, Logger::CONFIG) << "recursive include of file '"
<< include << "'";
FATAL_ERROR_EXIT();
}
_seen.insert(include);
LOG_TOPIC(DEBUG, Logger::CONFIG) << "reading include file '" << include
<< "'";
parse(include);
} else if (std::regex_match(line, match, _matchers.assignment)) {
// found assignment
std::string option;
@ -151,12 +166,9 @@ class IniFileParser {
return true;
}
// seen includes
std::vector<std::string> const& includes() const { return _includes; }
private:
ProgramOptions* _options;
std::vector<std::string> _includes;
std::set<std::string> _seen;
struct {
std::regex comment;

View File

@ -91,10 +91,12 @@ void SslServerFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
}
void SslServerFeature::prepare() {
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "using SSL options: " << stringifySslOptions(_sslOptions);
LOG_TOPIC(INFO, arangodb::Logger::SSL) << "using SSL options: "
<< stringifySslOptions(_sslOptions);
if (!_cipherList.empty()) {
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "using SSL cipher-list '" << _cipherList << "'";
LOG_TOPIC(INFO, arangodb::Logger::SSL) << "using SSL cipher-list '"
<< _cipherList << "'";
}
UniformCharacter r(
@ -105,35 +107,40 @@ void SslServerFeature::prepare() {
}
void SslServerFeature::unprepare() {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "unpreparing ssl: " << stringifySslOptions(_sslOptions);
LOG_TOPIC(TRACE, arangodb::Logger::SSL) << "unpreparing ssl: "
<< stringifySslOptions(_sslOptions);
}
void SslServerFeature::verifySslOptions() {
// check keyfile
if (_keyfile.empty()) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "keyfile empty'" << _keyfile << "'";
LOG_TOPIC(FATAL, arangodb::Logger::SSL) << "keyfile empty'" << _keyfile
<< "'";
FATAL_ERROR_EXIT();
}
// validate protocol
if (_sslProtocol <= SSL_UNKNOWN || _sslProtocol >= SSL_LAST) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "invalid SSL protocol version specified. Please use a valid "
"value for '--ssl.protocol'.";
LOG_TOPIC(FATAL, arangodb::Logger::SSL)
<< "invalid SSL protocol version specified. Please use a valid "
"value for '--ssl.protocol'.";
FATAL_ERROR_EXIT();
}
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "using SSL protocol version '"
<< protocolName((protocol_e)_sslProtocol) << "'";
LOG_TOPIC(DEBUG, arangodb::Logger::SSL)
<< "using SSL protocol version '"
<< protocolName((protocol_e)_sslProtocol) << "'";
if (!FileUtils::exists(_keyfile)) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "unable to find SSL keyfile '" << _keyfile << "'";
LOG_TOPIC(FATAL, arangodb::Logger::SSL) << "unable to find SSL keyfile '"
<< _keyfile << "'";
FATAL_ERROR_EXIT();
}
try {
createSslContext();
} catch (...) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot create SSL context";
LOG_TOPIC(FATAL, arangodb::Logger::SSL) << "cannot create SSL context";
FATAL_ERROR_EXIT();
}
}
@ -155,7 +162,8 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
auto sslContextOpt = ::sslContext(protocol_e(_sslProtocol), _keyfile);
if (!sslContextOpt) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to create SSL context, cannot create HTTPS server";
LOG_TOPIC(ERR, arangodb::Logger::SSL)
<< "failed to create SSL context, cannot create HTTPS server";
throw std::runtime_error("cannot create SSL context");
}
@ -175,7 +183,7 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
: SSL_SESS_CACHE_OFF);
if (_sessionCache) {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "using SSL session caching";
LOG_TOPIC(TRACE, arangodb::Logger::SSL) << "using SSL session caching";
}
// set options
@ -183,8 +191,9 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
if (!_cipherList.empty()) {
if (SSL_CTX_set_cipher_list(nativeContext, _cipherList.c_str()) != 1) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot set SSL cipher list '" << _cipherList
<< "': " << lastSSLError();
LOG_TOPIC(ERR, arangodb::Logger::SSL) << "cannot set SSL cipher list '"
<< _cipherList
<< "': " << lastSSLError();
throw std::runtime_error("cannot create SSL context");
}
}
@ -195,16 +204,18 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
sslEcdhNid = OBJ_sn2nid(_ecdhCurve.c_str());
if (sslEcdhNid == 0) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "SSL error: " << lastSSLError()
<< " Unknown curve name: " << _ecdhCurve;
LOG_TOPIC(ERR, arangodb::Logger::SSL)
<< "SSL error: " << lastSSLError()
<< " Unknown curve name: " << _ecdhCurve;
throw std::runtime_error("cannot create SSL context");
}
// https://www.openssl.org/docs/manmaster/apps/ecparam.html
ecdhKey = EC_KEY_new_by_curve_name(sslEcdhNid);
if (ecdhKey == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "SSL error: " << lastSSLError()
<< " Unable to create curve by name: " << _ecdhCurve;
LOG_TOPIC(ERR, arangodb::Logger::SSL)
<< "SSL error: " << lastSSLError()
<< " Unable to create curve by name: " << _ecdhCurve;
throw std::runtime_error("cannot create SSL context");
}
@ -218,20 +229,23 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
nativeContext, (unsigned char const*)_rctx.c_str(), (int)_rctx.size());
if (res != 1) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot set SSL session id context '" << _rctx
<< "': " << lastSSLError();
LOG_TOPIC(ERR, arangodb::Logger::SSL)
<< "cannot set SSL session id context '" << _rctx
<< "': " << lastSSLError();
throw std::runtime_error("cannot create SSL context");
}
// check CA
if (!_cafile.empty()) {
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "trying to load CA certificates from '" << _cafile << "'";
LOG_TOPIC(TRACE, arangodb::Logger::SSL)
<< "trying to load CA certificates from '" << _cafile << "'";
int res = SSL_CTX_load_verify_locations(nativeContext, _cafile.c_str(), 0);
if (res == 0) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load CA certificates from '" << _cafile
<< "': " << lastSSLError();
LOG_TOPIC(ERR, arangodb::Logger::SSL)
<< "cannot load CA certificates from '" << _cafile
<< "': " << lastSSLError();
throw std::runtime_error("cannot create SSL context");
}
@ -240,8 +254,9 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
certNames = SSL_load_client_CA_file(_cafile.c_str());
if (certNames == nullptr) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot load CA certificates from '" << _cafile
<< "': " << lastSSLError();
LOG_TOPIC(ERR, arangodb::Logger::SSL)
<< "cannot load CA certificates from '" << _cafile
<< "': " << lastSSLError();
throw std::runtime_error("cannot create SSL context");
}
@ -260,7 +275,8 @@ boost::asio::ssl::context SslServerFeature::createSslContext() const {
char* r;
long len = BIO_get_mem_data(bout._bio, &r);
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "name: " << std::string(r, len);
LOG_TOPIC(TRACE, arangodb::Logger::SSL) << "name: "
<< std::string(r, len);
}
}
}