1
0
Fork 0

Merge remote-tracking branch 'origin/devel' into feature/ldap-auth

This commit is contained in:
baslr 2017-04-26 10:52:50 +02:00
commit f45a7f07b1
118 changed files with 1919 additions and 990 deletions

View File

@ -182,9 +182,12 @@ class ObjectIterator {
ObjectIterator() = delete;
explicit ObjectIterator(Slice const& slice, bool allowRandomIteration = false)
// The useSequentialIteration flag indicates whether or not the iteration
// simply jumps from key/value pair to key/value pair without using the
// index. The default `false` is to use the index if it is there.
explicit ObjectIterator(Slice const& slice, bool useSequentialIteration = false)
: _slice(slice), _size(_slice.length()), _position(0), _current(nullptr),
_allowRandomIteration(allowRandomIteration) {
_useSequentialIteration(useSequentialIteration) {
if (!slice.isObject()) {
throw Exception(Exception::InvalidValueType, "Expecting Object slice");
}
@ -193,7 +196,7 @@ class ObjectIterator {
auto h = slice.head();
if (h == 0x14) {
_current = slice.keyAt(0, false).start();
} else if (allowRandomIteration) {
} else if (useSequentialIteration) {
_current = slice.begin() + slice.findDataOffset(h);
}
}
@ -204,7 +207,7 @@ class ObjectIterator {
_size(other._size),
_position(other._position),
_current(other._current),
_allowRandomIteration(other._allowRandomIteration) {}
_useSequentialIteration(other._useSequentialIteration) {}
ObjectIterator& operator=(ObjectIterator const& other) = delete;
@ -306,7 +309,7 @@ class ObjectIterator {
ValueLength const _size;
ValueLength _position;
uint8_t const* _current;
bool const _allowRandomIteration;
bool const _useSequentialIteration;
};
} // namespace arangodb::velocypack

View File

@ -113,7 +113,7 @@ devel
when unused.
Waiting for an unused V8 context will now also abort if no V8 context can be
acquired/created after 120 seconds.
acquired/created after 60 seconds.
* improved diagnostic messages written to logfiles by supervisor process

View File

@ -340,8 +340,8 @@ if (CMAKE_COMPILER_IS_CLANG)
endif ()
# need c++11
# XXX this should really be set on a per target level using cmake compile_features capabilties
set(CMAKE_CXX_STANDARD 11)
include(CheckCXX11Features)
# need threads
find_package(Threads REQUIRED)

View File

@ -254,6 +254,7 @@ while [ $# -gt 0 ]; do
MAKE="cmake --build . --config ${BUILD_CONFIG}"
PACKAGE_MAKE="cmake --build . --config ${BUILD_CONFIG} --target"
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DV8_TARGET_ARCHS=Release"
export _IsNativeEnvironment=true
;;
--symsrv)

View File

@ -208,6 +208,13 @@ void AgencyFeature::start() {
return;
}
// Find the agency prefix:
auto feature = ApplicationServer::getFeature<ClusterFeature>("Cluster");
arangodb::consensus::Supervision::setAgencyPrefix(
std::string("/") + feature->agencyPrefix());
arangodb::consensus::Job::agencyPrefix
= std::string("/") + feature->agencyPrefix();
// TODO: Port this to new options handling
std::string endpoint;

View File

@ -100,20 +100,8 @@ Agent::~Agent() {
FATAL_ERROR_EXIT();
}
}
if (!isStopping()) {
{
CONDITION_LOCKER(guardW, _waitForCV);
guardW.broadcast();
}
{
CONDITION_LOCKER(guardA, _appendCV);
guardA.broadcast();
}
shutdown();
}
shutdown();
}

View File

@ -483,7 +483,11 @@ void Supervision::run() {
// that running the supervision does not make sense and will indeed
// lead to horrible errors:
while (!this->isStopping()) {
std::this_thread::sleep_for(std::chrono::duration<double>(5.0));
{
CONDITION_LOCKER(guard, _cv);
_cv.wait(static_cast<uint64_t>(1000000 * _frequency));
}
MUTEX_LOCKER(locker, _lock);
try {
_snapshot = _agent->readDB().get(_agencyPrefix);

View File

@ -27,10 +27,14 @@
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Cluster/ClusterComm.h"
#include "Graph/AttributeWeightShortestPathFinder.h"
#include "Graph/ConstantWeightShortestPathFinder.h"
#include "Graph/ShortestPathFinder.h"
#include "Graph/ShortestPathResult.h"
#include "Graph/AttributeWeightShortestPathFinder.h"
#include "Graph/ConstantWeightShortestPathFinder.h"
#include "Transaction/Methods.h"
#include "Utils/OperationCursor.h"
#include "VocBase/EdgeCollectionInfo.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/ManagedDocumentResult.h"
#include "VocBase/ticks.h"
@ -48,7 +52,7 @@ ShortestPathBlock::ShortestPathBlock(ExecutionEngine* engine,
_vertexReg(ExecutionNode::MaxRegisterId),
_edgeVar(nullptr),
_edgeReg(ExecutionNode::MaxRegisterId),
_opts(nullptr),
_opts(static_cast<ShortestPathOptions*>(ep->options())),
_posInPath(0),
_pathLength(0),
_path(nullptr),
@ -58,8 +62,7 @@ ShortestPathBlock::ShortestPathBlock(ExecutionEngine* engine,
_useTargetRegister(false),
_usedConstant(false),
_engines(nullptr) {
_opts = static_cast<ShortestPathOptions*>(ep->options());
_mmdr.reset(new ManagedDocumentResult);
TRI_ASSERT(_opts != nullptr);
if (!ep->usesStartInVariable()) {
_startVertexId = ep->getStartVertex();
@ -257,9 +260,7 @@ bool ShortestPathBlock::nextPath(AqlItemBlock const* items) {
VPackSlice start = _opts->getStart();
VPackSlice end = _opts->getEnd();
TRI_ASSERT(_finder != nullptr);
// We do not need this data anymore. Result has been processed.
// Save some memory.
_coordinatorCache.clear();
bool hasPath =
_finder->shortestPath(start, end, *_path, [this]() { throwIfKilled(); });

View File

@ -32,25 +32,15 @@ namespace arangodb {
class ManagedDocumentResult;
namespace graph {
class ConstantWeightShortestPathFinder;
class ShortestPathFinder;
class ShortestPathResult;
}
namespace traverser {
class EdgeCollectionInfo;
}
namespace aql {
class ShortestPathNode;
class ShortestPathBlock : public ExecutionBlock {
friend struct EdgeWeightExpanderLocal;
friend struct EdgeWeightExpanderCluster;
// TODO ONLY TEMPORARY
friend class graph::ConstantWeightShortestPathFinder;
public:
ShortestPathBlock(ExecutionEngine* engine, ShortestPathNode const* ep);
@ -100,8 +90,6 @@ class ShortestPathBlock : public ExecutionBlock {
/// @brief Register for the edge output
RegisterId _edgeReg;
std::unique_ptr<ManagedDocumentResult> _mmdr;
/// @brief options to compute the shortest path
graph::ShortestPathOptions* _opts;
@ -146,9 +134,6 @@ class ShortestPathBlock : public ExecutionBlock {
/// We use it to check if we are done with enumerating.
bool _usedConstant;
/// @brief Cache for edges send over the network
std::vector<std::shared_ptr<VPackBuffer<uint8_t>>> _coordinatorCache;
/// @brief Traverser Engines
std::unordered_map<ServerID, traverser::TraverserEngineID> const* _engines;

View File

@ -428,6 +428,8 @@ target_link_libraries(${BIN_ARANGOD}
arangoserver
)
target_compile_features(${BIN_ARANGOD} PRIVATE cxx_constexpr)
install(
TARGETS ${BIN_ARANGOD}
RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}

View File

@ -577,7 +577,7 @@ bool ClusterComm::match(ClientTransactionID const& clientTransactionID,
/// from deleting `result` and `answer`.
////////////////////////////////////////////////////////////////////////////////
ClusterCommResult const ClusterComm::enquire(Ticket const ticketId) {
ClusterCommResult const ClusterComm::enquire(communicator::Ticket const ticketId) {
ResponseIterator i;
AsyncResponse response;
@ -614,7 +614,7 @@ ClusterCommResult const ClusterComm::enquire(Ticket const ticketId) {
ClusterCommResult const ClusterComm::wait(
ClientTransactionID const& clientTransactionID,
CoordTransactionID const coordTransactionID, Ticket const ticketId,
CoordTransactionID const coordTransactionID, communicator::Ticket const ticketId,
ShardID const& shardID, ClusterCommTimeout timeout) {
ResponseIterator i;
@ -1123,6 +1123,19 @@ void ClusterComm::addAuthorization(std::unordered_map<std::string, std::string>*
}
}
std::vector<communicator::Ticket> ClusterComm::activeServerTickets(std::vector<std::string> const& servers) {
std::vector<communicator::Ticket> tickets;
CONDITION_LOCKER(locker, somethingReceived);
for (auto const& it: responses) {
for (auto const& server: servers) {
if (it.second.result && it.second.result->serverID == server) {
tickets.push_back(it.first);
}
}
}
return tickets;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief ClusterComm main loop
////////////////////////////////////////////////////////////////////////////////
@ -1130,18 +1143,10 @@ void ClusterComm::addAuthorization(std::unordered_map<std::string, std::string>*
void ClusterCommThread::abortRequestsToFailedServers() {
ClusterInfo* ci = ClusterInfo::instance();
auto failedServers = ci->getFailedServers();
std::vector<std::string> failedServerEndpoints;
failedServerEndpoints.reserve(failedServers.size());
for (auto const& failedServer: failedServers) {
failedServerEndpoints.push_back(_cc->createCommunicatorDestination(ci->getServerEndpoint(failedServer), "/").url());
}
for (auto const& request: _cc->communicator()->requestsInProgress()) {
for (auto const& failedServerEndpoint: failedServerEndpoints) {
if (request->_destination.url().substr(0, failedServerEndpoint.length()) == failedServerEndpoint) {
_cc->communicator()->abortRequest(request->_ticketId);
}
if (failedServers.size() > 0) {
auto ticketIds = _cc->activeServerTickets(failedServers);
for (auto const& ticketId: ticketIds) {
_cc->communicator()->abortRequest(ticketId);
}
}
}

View File

@ -616,6 +616,12 @@ class ClusterComm {
void cleanupAllQueues();
//////////////////////////////////////////////////////////////////////////////
/// @brief activeServerTickets for a list of servers
//////////////////////////////////////////////////////////////////////////////
std::vector<communicator::Ticket> activeServerTickets(std::vector<std::string> const& servers);
//////////////////////////////////////////////////////////////////////////////
/// @brief our background communications thread
//////////////////////////////////////////////////////////////////////////////

View File

@ -133,6 +133,10 @@ void ClusterFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
options->addOption("--cluster.system-replication-factor",
"replication factor for system collections",
new UInt32Parameter(&_systemReplicationFactor));
options->addOption("--cluster.create-waits-for-sync-replication",
"active coordinator will wait for all replicas to create collection",
new BooleanParameter(&_createWaitsForSyncReplication));
}
void ClusterFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {

View File

@ -45,6 +45,10 @@ class ClusterFeature : public application_features::ApplicationFeature {
void start() override final;
void unprepare() override final;
std::string agencyPrefix() {
return _agencyPrefix;
}
private:
std::vector<std::string> _agencyEndpoints;
std::string _agencyPrefix;
@ -58,6 +62,7 @@ class ClusterFeature : public application_features::ApplicationFeature {
std::string _dbserverConfig;
std::string _coordinatorConfig;
uint32_t _systemReplicationFactor = 2;
bool _createWaitsForSyncReplication = false;
private:
void reportRole(ServerState::RoleEnum);
@ -72,6 +77,7 @@ class ClusterFeature : public application_features::ApplicationFeature {
};
void setUnregisterOnShutdown(bool);
bool createWaitsForSyncReplication() { return _createWaitsForSyncReplication; };
void stop() override final;

View File

@ -1042,6 +1042,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
std::string const& collectionID,
uint64_t numberOfShards,
uint64_t replicationFactor,
bool waitForReplication,
VPackSlice const& json,
std::string& errorMsg,
double timeout) {
@ -1100,19 +1101,10 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
[=](VPackSlice const& result) {
if (result.isObject() && result.length() == (size_t)numberOfShards) {
std::string tmpMsg = "";
bool tmpHaveError = false;
for (auto const& p : VPackObjectIterator(result)) {
if (replicationFactor == 0) {
VPackSlice servers = p.value.get("servers");
if (!servers.isArray() || servers.length() < dbServers.size()) {
return true;
}
}
if (arangodb::basics::VelocyPackHelper::getBooleanValue(
p.value, "error", false)) {
tmpHaveError = true;
tmpMsg += " shardID:" + p.key.copyString() + ":";
tmpMsg += arangodb::basics::VelocyPackHelper::getStringValue(
p.value, "errorMessage", "");
@ -1125,13 +1117,24 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
tmpMsg += ")";
}
}
*errMsg = "Error in creation of collection:" + tmpMsg + " "
+ __FILE__ + std::to_string(__LINE__);
*dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION;
return true;
}
// wait that all followers have created our new collection
if (waitForReplication) {
uint64_t mutableReplicationFactor = replicationFactor;
if (mutableReplicationFactor == 0) {
mutableReplicationFactor = dbServers.size();
}
VPackSlice servers = p.value.get("servers");
if (!servers.isArray() || servers.length() < mutableReplicationFactor) {
return true;
}
}
}
if (tmpHaveError) {
*errMsg = "Error in creation of collection:" + tmpMsg + " "
+ __FILE__ + std::to_string(__LINE__);
*dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION;
return true;
}
*dbServerResult = setErrormsg(TRI_ERROR_NO_ERROR, *errMsg);
}

View File

@ -349,6 +349,7 @@ class ClusterInfo {
std::string const& collectionID,
uint64_t numberOfShards,
uint64_t replicationFactor,
bool waitForReplication,
arangodb::velocypack::Slice const& json,
std::string& errorMsg, double timeout);

View File

@ -2264,12 +2264,13 @@ std::unique_ptr<LogicalCollection>
ClusterMethods::createCollectionOnCoordinator(TRI_col_type_e collectionType,
TRI_vocbase_t* vocbase,
VPackSlice parameters,
bool ignoreDistributeShardsLikeErrors) {
bool ignoreDistributeShardsLikeErrors,
bool waitForSyncReplication) {
auto col = std::make_unique<LogicalCollection>(vocbase, parameters);
// Collection is a temporary collection object that undergoes sanity checks etc.
// It is not used anywhere and will be cleaned up after this call.
// Persist collection will return the real object.
return persistCollectionInAgency(col.get(), ignoreDistributeShardsLikeErrors);
return persistCollectionInAgency(col.get(), ignoreDistributeShardsLikeErrors, waitForSyncReplication);
}
#endif
@ -2279,7 +2280,7 @@ ClusterMethods::createCollectionOnCoordinator(TRI_col_type_e collectionType,
std::unique_ptr<LogicalCollection>
ClusterMethods::persistCollectionInAgency(
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors) {
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors, bool waitForSyncReplication) {
std::string distributeShardsLike = col->distributeShardsLike();
std::vector<std::string> dbServers;
std::vector<std::string> avoid = col->avoidServers();
@ -2364,7 +2365,7 @@ ClusterMethods::persistCollectionInAgency(
std::string errorMsg;
int myerrno = ci->createCollectionCoordinator(
col->dbName(), col->cid_as_string(),
col->numberOfShards(), col->replicationFactor(), velocy.slice(), errorMsg, 240.0);
col->numberOfShards(), col->replicationFactor(), waitForSyncReplication, velocy.slice(), errorMsg, 240.0);
if (myerrno != TRI_ERROR_NO_ERROR) {
if (errorMsg.empty()) {

View File

@ -258,7 +258,8 @@ class ClusterMethods {
static std::unique_ptr<LogicalCollection> createCollectionOnCoordinator(
TRI_col_type_e collectionType, TRI_vocbase_t* vocbase,
arangodb::velocypack::Slice parameters,
bool ignoreDistributeShardsLikeErrors = false);
bool ignoreDistributeShardsLikeErrors,
bool waitForSyncReplication);
private:
@ -267,7 +268,7 @@ class ClusterMethods {
////////////////////////////////////////////////////////////////////////////////
static std::unique_ptr<LogicalCollection> persistCollectionInAgency(
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors = false);
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors, bool waitForSyncReplication);
};
} // namespace arangodb

View File

@ -48,7 +48,6 @@ using namespace arangodb::rest;
GeneralServer::~GeneralServer() {
for (auto& task : _listenTasks) {
task->stop();
delete task;
}
}

View File

@ -107,12 +107,6 @@ GeneralServerFeature::GeneralServerFeature(
startsAfter("Upgrade");
}
GeneralServerFeature::~GeneralServerFeature() {
for (auto& server : _servers) {
delete server;
}
}
void GeneralServerFeature::collectOptions(
std::shared_ptr<ProgramOptions> options) {
options->addSection("server", "Server features");
@ -273,6 +267,10 @@ void GeneralServerFeature::stop() {
}
void GeneralServerFeature::unprepare() {
for (auto& server : _servers) {
delete server;
}
GENERAL_SERVER = nullptr;
JOB_MANAGER = nullptr;
HANDLER_FACTORY = nullptr;

View File

@ -84,7 +84,6 @@ class GeneralServerFeature final
public:
explicit GeneralServerFeature(application_features::ApplicationServer*);
~GeneralServerFeature();
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;

View File

@ -514,7 +514,7 @@ void Index::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&
documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
for (auto const& it : documents) {
int status = insert(trx, it.first, it.second, false);
if (status != TRI_ERROR_NO_ERROR) {

View File

@ -250,7 +250,7 @@ class Index {
virtual void batchInsert(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr);
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
virtual int unload() = 0;

View File

@ -73,7 +73,7 @@ namespace {
class MMFilesIndexFillerTask : public basics::LocalTask {
public:
MMFilesIndexFillerTask(
basics::LocalTaskQueue* queue, transaction::Methods* trx, Index* idx,
std::shared_ptr<basics::LocalTaskQueue> queue, transaction::Methods* trx, Index* idx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents)
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
@ -1464,7 +1464,7 @@ bool MMFilesCollection::openIndex(VPackSlice const& description,
/// @brief initializes an index with a set of existing documents
void MMFilesCollection::fillIndex(
arangodb::basics::LocalTaskQueue* queue, transaction::Methods* trx,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue, transaction::Methods* trx,
arangodb::Index* idx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
bool skipPersistent) {
@ -1554,12 +1554,13 @@ int MMFilesCollection::fillIndexes(
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
auto ioService = SchedulerFeature::SCHEDULER->ioService();
TRI_ASSERT(ioService != nullptr);
arangodb::basics::LocalTaskQueue queue(ioService);
PerformanceLogScope logScope(
std::string("fill-indexes-document-collection { collection: ") +
_logicalCollection->vocbase()->name() + "/" + _logicalCollection->name() +
" }, indexes: " + std::to_string(n - 1));
auto queue = std::make_shared<arangodb::basics::LocalTaskQueue>(ioService);
try {
TRI_ASSERT(!ServerState::instance()->isCoordinator());
@ -1585,8 +1586,6 @@ int MMFilesCollection::fillIndexes(
blockSize = 1;
}
ManagedDocumentResult mmdr;
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> documents;
documents.reserve(blockSize);
@ -1596,12 +1595,12 @@ int MMFilesCollection::fillIndexes(
if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
continue;
}
fillIndex(&queue, trx, idx.get(), documents, skipPersistent);
fillIndex(queue, trx, idx.get(), documents, skipPersistent);
}
queue.dispatchAndWait();
queue->dispatchAndWait();
if (queue.status() != TRI_ERROR_NO_ERROR) {
if (queue->status() != TRI_ERROR_NO_ERROR) {
rollbackAll();
rolledBack = true;
}
@ -1628,7 +1627,7 @@ int MMFilesCollection::fillIndexes(
if (documents.size() == blockSize) {
// now actually fill the secondary indexes
insertInAllIndexes();
if (queue.status() != TRI_ERROR_NO_ERROR) {
if (queue->status() != TRI_ERROR_NO_ERROR) {
break;
}
documents.clear();
@ -1638,33 +1637,33 @@ int MMFilesCollection::fillIndexes(
}
// process the remainder of the documents
if (queue.status() == TRI_ERROR_NO_ERROR && !documents.empty()) {
if (queue->status() == TRI_ERROR_NO_ERROR && !documents.empty()) {
insertInAllIndexes();
}
} catch (arangodb::basics::Exception const& ex) {
queue.setStatus(ex.code());
queue->setStatus(ex.code());
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "caught exception while filling indexes: " << ex.what();
} catch (std::bad_alloc const&) {
queue.setStatus(TRI_ERROR_OUT_OF_MEMORY);
queue->setStatus(TRI_ERROR_OUT_OF_MEMORY);
} catch (std::exception const& ex) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "caught exception while filling indexes: " << ex.what();
queue.setStatus(TRI_ERROR_INTERNAL);
queue->setStatus(TRI_ERROR_INTERNAL);
} catch (...) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "caught unknown exception while filling indexes";
queue.setStatus(TRI_ERROR_INTERNAL);
queue->setStatus(TRI_ERROR_INTERNAL);
}
if (queue.status() != TRI_ERROR_NO_ERROR && !rolledBack) {
if (queue->status() != TRI_ERROR_NO_ERROR && !rolledBack) {
try {
rollbackAll();
} catch (...) {
}
}
return queue.status();
return queue->status();
}
/// @brief opens an existing collection

View File

@ -398,7 +398,7 @@ class MMFilesCollection final : public PhysicalCollection {
bool openIndex(VPackSlice const& description, transaction::Methods* trx);
/// @brief initializes an index with all existing documents
void fillIndex(basics::LocalTaskQueue*, transaction::Methods*, Index*,
void fillIndex(std::shared_ptr<basics::LocalTaskQueue>, transaction::Methods*, Index*,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const&,
bool);

View File

@ -329,7 +329,7 @@ int MMFilesEdgeIndex::remove(transaction::Methods* trx,
void MMFilesEdgeIndex::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
if (documents.empty()) {
return;
}

View File

@ -111,7 +111,7 @@ class MMFilesEdgeIndex final : public Index {
void batchInsert(transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const&,
arangodb::basics::LocalTaskQueue*) override;
std::shared_ptr<arangodb::basics::LocalTaskQueue>) override;
int unload() override;

View File

@ -152,7 +152,7 @@ MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
MMFilesEngine::~MMFilesEngine() {}
// perform a physical deletion of the database
Result MMFilesEngine::dropDatabase(Database* database) {
Result MMFilesEngine::dropDatabase(TRI_vocbase_t* database) {
// delete persistent indexes for this database
MMFilesPersistentIndexFeature::dropDatabase(database->id());

View File

@ -138,14 +138,14 @@ class MMFilesEngine final : public StorageEngine {
void waitForSync(TRI_voc_tick_t tick) override;
virtual TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& parameters, bool isUpgrade, int&) override;
Database* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) override {
TRI_vocbase_t* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) override {
status = TRI_ERROR_NO_ERROR;
return createDatabaseMMFiles(id, args);
}
int writeCreateDatabaseMarker(TRI_voc_tick_t id, VPackSlice const& slice) override;
void prepareDropDatabase(TRI_vocbase_t* vocbase, bool useWriteMarker, int& status) override;
Result dropDatabase(Database* database) override;
Result dropDatabase(TRI_vocbase_t* database) override;
void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) override;
// wal in recovery

View File

@ -644,7 +644,7 @@ int MMFilesHashIndex::remove(transaction::Methods* trx,
void MMFilesHashIndex::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
if (_unique) {
batchInsertUnique(trx, documents, queue);
@ -760,7 +760,7 @@ int MMFilesHashIndex::insertUnique(transaction::Methods* trx,
void MMFilesHashIndex::batchInsertUnique(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
std::shared_ptr<std::vector<MMFilesHashIndexElement*>> elements;
elements.reset(new std::vector<MMFilesHashIndexElement*>());
@ -880,7 +880,7 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
void MMFilesHashIndex::batchInsertMulti(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
std::shared_ptr<std::vector<MMFilesHashIndexElement*>> elements;
elements.reset(new std::vector<MMFilesHashIndexElement*>());

View File

@ -173,7 +173,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
void batchInsert(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr) override;
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
int unload() override;
@ -205,7 +205,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
void batchInsertUnique(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr);
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
int insertMulti(transaction::Methods*, TRI_voc_rid_t,
arangodb::velocypack::Slice const&, bool isRollback);
@ -213,7 +213,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
void batchInsertMulti(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr);
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
int removeUniqueElement(transaction::Methods*, MMFilesHashIndexElement*,
bool);

View File

@ -29,6 +29,7 @@
#include "Basics/conversions.h"
#include "Basics/files.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterFeature.h"
#include "Cluster/ClusterMethods.h"
#include "Cluster/FollowerInfo.h"
#include "GeneralServer/GeneralServer.h"
@ -1592,14 +1593,15 @@ int MMFilesRestReplicationHandler::processRestoreCollectionCoordinator(
if (dropExisting) {
int res = ci->dropCollectionCoordinator(dbName, col->cid_as_string(),
errorMsg, 0.0);
if (res == TRI_ERROR_FORBIDDEN) {
if (res == TRI_ERROR_FORBIDDEN ||
res == TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE) {
// some collections must not be dropped
res = truncateCollectionOnCoordinator(dbName, name);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg =
"unable to truncate collection (dropping is forbidden): " + name;
return res;
}
return res;
}
if (res != TRI_ERROR_NO_ERROR) {
@ -1679,8 +1681,9 @@ int MMFilesRestReplicationHandler::processRestoreCollectionCoordinator(
VPackSlice const merged = mergedBuilder.slice();
try {
bool createWaitsForSyncReplication = application_features::ApplicationServer::getFeature<ClusterFeature>("Cluster")->createWaitsForSyncReplication();
auto col = ClusterMethods::createCollectionOnCoordinator(
collectionType, _vocbase, merged, ignoreDistributeShardsLikeErrors);
collectionType, _vocbase, merged, ignoreDistributeShardsLikeErrors, createWaitsForSyncReplication);
TRI_ASSERT(col != nullptr);
} catch (basics::Exception const& e) {
// Error, report it.

View File

@ -75,7 +75,7 @@ class Edge {
// EdgeEntry() : _nextEntryOffset(0), _dataSize(0), _vertexIDSize(0) {}
Edge() : _targetShard(InvalidPregelShard) {}
Edge(PregelShard target, PregelKey const& key)
: _targetShard(target), _toKey(key) {}
: _targetShard(target), _toKey(key), _data(0) {}
// size_t getSize() { return sizeof(EdgeEntry) + _vertexIDSize + _dataSize; }
PregelKey const& toKey() const { return _toKey; }

View File

@ -62,7 +62,8 @@ Worker<V, E, M>::Worker(TRI_vocbase_t* vocbase, Algorithm<V, E, M>* algo,
: _state(WorkerState::IDLE),
_config(vocbase, initConfig),
_algorithm(algo),
_nextGSSSendMessageCount(0) {
_nextGSSSendMessageCount(0),
_requestedNextGSS(false) {
MUTEX_LOCKER(guard, _commandMutex);
VPackSlice userParams = initConfig.get(Utils::userParametersKey);

View File

@ -32,15 +32,20 @@
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
#include "Indexes/Index.h"
#include "Indexes/IndexIterator.h"
#include "Logger/Logger.h"
#include "MMFiles/MMFilesCollection.h" //TODO -- Remove -- ditches
#include "MMFiles/MMFilesDatafileHelper.h"
#include "MMFiles/MMFilesDitch.h"
#include "MMFiles/MMFilesIndexElement.h"
#include "MMFiles/MMFilesPrimaryIndex.h"
#include "RestServer/DatabaseFeature.h"
#include "RocksDBEngine/RocksDBCommon.h"
#include "SimpleHttpClient/SimpleHttpClient.h"
#include "SimpleHttpClient/SimpleHttpResult.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/StorageEngine.h"
#include "Transaction/Helpers.h"
#include "Utils/CollectionGuard.h"
#include "Utils/OperationOptions.h"
#include "VocBase/LogicalCollection.h"
@ -53,11 +58,13 @@
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
#include <cstring>
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::httpclient;
using namespace arangodb::rest;
using namespace arangodb::rocksutils;
////////////////////////////////////////////////////////////////////////////////
/// @brief performs a binary search for the given key in the markers vector
@ -267,7 +274,9 @@ int InitialSyncer::run(std::string& errorMsg, bool incremental) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": invalid JSON";
} else {
res = handleInventoryResponse(slice, incremental, errorMsg);
auto pair = stripObjectIds(slice);
res = handleInventoryResponse(pair.first, incremental,
errorMsg);
}
}
@ -1020,8 +1029,16 @@ int InitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
// now we can fetch the complete chunk information from the master
try {
res = handleSyncKeys(col, id.copyString(), cid, collectionName, maxTick,
errorMsg);
if (std::strcmp("mmfiles", EngineSelectorFeature::engineName()) == 0) {
res = handleSyncKeysMMFiles(col, id.copyString(), cid, collectionName,
maxTick, errorMsg);
} else if (std::strcmp("rocksdb", EngineSelectorFeature::engineName()) ==
0) {
res = handleSyncKeysRocksDB(col, id.copyString(), cid, collectionName,
maxTick, errorMsg);
} else {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
}
} catch (arangodb::basics::Exception const& ex) {
res = ex.code();
} catch (std::exception const& ex) {
@ -1038,18 +1055,563 @@ int InitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
/// @brief incrementally fetch data from a collection
////////////////////////////////////////////////////////////////////////////////
int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
std::string const& keysId,
std::string const& cid,
std::string const& collectionName,
TRI_voc_tick_t maxTick,
std::string& errorMsg) {
int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
std::string const& keysId,
std::string const& cid,
std::string const& collectionName,
TRI_voc_tick_t maxTick,
std::string& errorMsg) {
std::string progress =
"collecting local keys for collection '" + collectionName + "'";
setProgress(progress);
if (checkAborted()) {
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
}
sendExtendBatch();
sendExtendBarrier();
TRI_voc_tick_t const chunkSize = 5000;
std::string const baseUrl = BaseUrl + "/keys";
std::string url =
baseUrl + "/" + keysId + "?chunkSize=" + std::to_string(chunkSize);
progress = "fetching remote keys chunks for collection '" + collectionName +
"' from " + url;
setProgress(progress);
std::unique_ptr<SimpleHttpResult> response(
_client->retryRequest(rest::RequestType::GET, url, nullptr, 0));
if (response == nullptr || !response->isComplete()) {
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
": " + _client->getErrorMessage();
return TRI_ERROR_REPLICATION_NO_RESPONSE;
}
TRI_ASSERT(response != nullptr);
if (response->wasHttpError()) {
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
": HTTP " + StringUtils::itoa(response->getHttpReturnCode()) +
": " + response->getHttpReturnMessage();
return TRI_ERROR_REPLICATION_MASTER_ERROR;
}
auto builder = std::make_shared<VPackBuilder>();
int res = parseResponse(builder, response.get());
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "got invalid response from master at " +
std::string(_masterInfo._endpoint) +
": invalid response is no array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
VPackSlice const chunkSlice = builder->slice();
if (!chunkSlice.isArray()) {
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
": response is no array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
ManagedDocumentResult mmdr;
OperationOptions options;
options.silent = true;
options.ignoreRevs = true;
options.isRestore = true;
VPackBuilder keyBuilder;
size_t const numChunks = static_cast<size_t>(chunkSlice.length());
// remove all keys that are below first remote key or beyond last remote key
if (numChunks > 0) {
// first chunk
SingleCollectionTransaction trx(
transaction::StandaloneContext::Create(_vocbase), col->cid(),
AccessMode::Type::WRITE);
Result res = trx.begin();
if (!res.ok()) {
errorMsg =
std::string("unable to start transaction: ") + res.errorMessage();
res.reset(res.errorNumber(), errorMsg);
return res.errorNumber();
}
VPackSlice chunk = chunkSlice.at(0);
TRI_ASSERT(chunk.isObject());
auto lowSlice = chunk.get("low");
TRI_ASSERT(lowSlice.isString());
// last high
chunk = chunkSlice.at(numChunks - 1);
TRI_ASSERT(chunk.isObject());
auto highSlice = chunk.get("high");
TRI_ASSERT(highSlice.isString());
std::string const lowKey(lowSlice.copyString());
std::string const highKey(highSlice.copyString());
LogicalCollection* coll = trx.documentCollection();
std::unique_ptr<IndexIterator> iterator =
coll->getAllIterator(&trx, &mmdr, false);
iterator->next(
[&](DocumentIdentifierToken const& token) {
if (coll->readDocument(&trx, token, mmdr) == false) {
return;
}
VPackSlice doc(mmdr.vpack());
VPackSlice key = doc.get(StaticStrings::KeyString);
if (key.compareString(lowKey.data(), lowKey.length()) < 0) {
trx.remove(collectionName, key, options);
} else if (key.compareString(highKey.data(), highKey.length()) > 0) {
trx.remove(collectionName, key, options);
}
},
UINT64_MAX);
trx.commit();
}
{
if (checkAborted()) {
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
}
SingleCollectionTransaction trx(
transaction::StandaloneContext::Create(_vocbase), col->cid(),
AccessMode::Type::WRITE);
Result res = trx.begin();
if (!res.ok()) {
errorMsg =
std::string("unable to start transaction: ") + res.errorMessage();
res.reset(res.errorNumber(), res.errorMessage());
return res.errorNumber();
}
// We do not take responsibility for the index.
// The LogicalCollection is protected by trx.
// Neither it nor it's indexes can be invalidated
size_t currentChunkId = 0;
std::string lowKey;
std::string highKey;
std::string hashString;
uint64_t localHash = 0x012345678;
// chunk keys + revisionId
std::vector<std::pair<std::string, uint64_t>> markers;
bool foundLowKey = false;
auto resetChunk = [&]() -> void {
sendExtendBatch();
sendExtendBarrier();
progress = "processing keys chunk " + std::to_string(currentChunkId) +
" for collection '" + collectionName + "'";
setProgress(progress);
// read remote chunk
VPackSlice chunk = chunkSlice.at(currentChunkId);
if (!chunk.isObject()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": chunk is no object";
THROW_ARANGO_EXCEPTION(TRI_ERROR_REPLICATION_INVALID_RESPONSE);
}
VPackSlice const lowSlice = chunk.get("low");
VPackSlice const highSlice = chunk.get("high");
VPackSlice const hashSlice = chunk.get("hash");
if (!lowSlice.isString() || !highSlice.isString() ||
!hashSlice.isString()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint +
": chunks in response have an invalid format";
THROW_ARANGO_EXCEPTION(TRI_ERROR_REPLICATION_INVALID_RESPONSE);
}
// now reset chunk information
markers.clear();
lowKey = lowSlice.copyString();
highKey = highSlice.copyString();
hashString = hashSlice.copyString();
localHash = 0x012345678;
foundLowKey = false;
};
// set to first chunk
resetChunk();
std::function<void(VPackSlice, VPackSlice)> parseDoc = [&](VPackSlice doc,
VPackSlice key) {
bool rangeUnequal = false;
bool nextChunk = false;
int cmp1 = key.compareString(lowKey.data(), lowKey.length());
int cmp2 = key.compareString(highKey.data(), highKey.length());
if (cmp1 < 0) {
// smaller values than lowKey mean they don't exist remotely
trx.remove(collectionName, key, options);
return;
}
if (cmp1 >= 0 && cmp2 <= 0) {
// we only need to hash we are in the range
if (cmp1 == 0) {
foundLowKey = true;
} else if (!foundLowKey && cmp1 > 0) {
rangeUnequal = true;
nextChunk = true;
}
if (foundLowKey) {
VPackSlice revision = doc.get(StaticStrings::RevString);
localHash ^= key.hashString();
localHash ^= revision.hash();
markers.emplace_back(key.copyString(), TRI_ExtractRevisionId(doc));
if (cmp2 == 0) { // found highKey
rangeUnequal = std::to_string(localHash) != hashString;
nextChunk = true;
}
} else if (cmp2 == 0) {
rangeUnequal = true;
nextChunk = true;
}
} else if (cmp2 > 0) { // higher than highKey
// current range was unequal and we did not find the
// high key. Load range and skip to next
rangeUnequal = true;
nextChunk = true;
}
if (rangeUnequal) {
int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey,
highKey, markers, errorMsg);
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(res);
}
}
TRI_ASSERT(!rangeUnequal || nextChunk); // A => B
if (nextChunk && currentChunkId + 1 < numChunks) {
currentChunkId++; // we are out of range, see next chunk
resetChunk();
// key is higher than upper bound, recheck the current document
if (cmp2 > 0) {
parseDoc(doc, key);
}
}
};
std::unique_ptr<IndexIterator> iterator =
col->getAllIterator(&trx, &mmdr, false);
iterator->next(
[&](DocumentIdentifierToken const& token) {
if (col->readDocument(&trx, token, mmdr) == false) {
return;
}
VPackSlice doc(mmdr.vpack());
VPackSlice key = doc.get(StaticStrings::KeyString);
parseDoc(doc, key);
},
UINT64_MAX);
res = trx.commit();
if (!res.ok()) {
return res.errorNumber();
}
}
return res;
}
int InitialSyncer::syncChunkRocksDB(
SingleCollectionTransaction* trx, std::string const& keysId,
uint64_t chunkId, std::string const& lowString,
std::string const& highString,
std::vector<std::pair<std::string, uint64_t>> markers,
std::string& errorMsg) {
std::string const baseUrl = BaseUrl + "/keys";
TRI_voc_tick_t const chunkSize = 5000;
std::string const& collectionName = trx->documentCollection()->name();
PhysicalCollection* physical = trx->documentCollection()->getPhysical();
OperationOptions options;
options.silent = true;
options.ignoreRevs = true;
options.isRestore = true;
// no match
// must transfer keys for non-matching range
std::string url = baseUrl + "/" + keysId + "?type=keys&chunk=" +
std::to_string(chunkId) + "&chunkSize=" +
std::to_string(chunkSize);
std::string progress =
"fetching keys chunk '" + std::to_string(chunkId) + "' from " + url;
setProgress(progress);
std::unique_ptr<SimpleHttpResult> response(
_client->retryRequest(rest::RequestType::PUT, url, nullptr, 0));
if (response == nullptr || !response->isComplete()) {
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
": " + _client->getErrorMessage();
return TRI_ERROR_REPLICATION_NO_RESPONSE;
}
TRI_ASSERT(response != nullptr);
if (response->wasHttpError()) {
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
": HTTP " + StringUtils::itoa(response->getHttpReturnCode()) +
": " + response->getHttpReturnMessage();
return TRI_ERROR_REPLICATION_MASTER_ERROR;
}
auto builder = std::make_shared<VPackBuilder>();
int res = parseResponse(builder, response.get());
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
": response is no array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
VPackSlice const responseBody = builder->slice();
if (!responseBody.isArray()) {
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
": response is no array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
transaction::BuilderLeaser keyBuilder(trx);
/*size_t nextStart = 0;
// delete all keys at start of the range
while (nextStart < markers.size()) {
std::string const& localKey = markers[nextStart].first;
if ( localKey.compare(lowString) < 0) {
// we have a local key that is not present remotely
keyBuilder.clear();
keyBuilder.openObject();
keyBuilder.add(StaticStrings::KeyString, VPackValue(localKey));
keyBuilder.close();
trx.remove(collectionName, keyBuilder.slice(), options);
++nextStart;
} else {
break;
}
}*/
std::vector<size_t> toFetch;
size_t const numKeys = static_cast<size_t>(responseBody.length());
TRI_ASSERT(numKeys > 0);
size_t i = 0;
size_t nextStart = 0;
for (VPackSlice const& pair : VPackArrayIterator(responseBody)) {
if (!pair.isArray() || pair.length() != 2) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint +
": response key pair is no valid array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
// key
VPackSlice const keySlice = pair.at(0);
if (!keySlice.isString()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": response key is no string";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
// rid
if (markers.empty()) {
// no local markers
toFetch.emplace_back(i);
i++;
continue;
}
std::string const keyString = keySlice.copyString();
// remove keys not present anymore
while (nextStart < markers.size()) {
std::string const& localKey = markers[nextStart].first;
int res = localKey.compare(keyString);
if (res != 0) {
// we have a local key that is not present remotely
keyBuilder->clear();
keyBuilder->openObject();
keyBuilder->add(StaticStrings::KeyString, VPackValue(localKey));
keyBuilder->close();
trx->remove(collectionName, keyBuilder->slice(), options);
++nextStart;
} else {
// key match
break;
}
}
// see if key exists
DocumentIdentifierToken token = physical->lookupKey(trx, keySlice);
if (!token._data) {
// key not found locally
toFetch.emplace_back(i);
} else if (TRI_RidToString(token._data) != pair.at(1).copyString()) {
// key found, but revision id differs
toFetch.emplace_back(i);
++nextStart;
} else {
// a match - nothing to do!
++nextStart;
}
i++;
}
if (!toFetch.empty()) {
VPackBuilder keysBuilder;
keysBuilder.openArray();
for (auto& it : toFetch) {
keysBuilder.add(VPackValue(it));
}
keysBuilder.close();
std::string url = baseUrl + "/" + keysId + "?type=docs&chunk=" +
std::to_string(chunkId) + "&chunkSize=" +
std::to_string(chunkSize);
progress = "fetching documents chunk " + std::to_string(chunkId) +
" for collection '" + collectionName + "' from " + url;
setProgress(progress);
std::string const keyJsonString(keysBuilder.slice().toJson());
std::unique_ptr<SimpleHttpResult> response(
_client->retryRequest(rest::RequestType::PUT, url,
keyJsonString.c_str(), keyJsonString.size()));
if (response == nullptr || !response->isComplete()) {
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
": " + _client->getErrorMessage();
return TRI_ERROR_REPLICATION_NO_RESPONSE;
}
TRI_ASSERT(response != nullptr);
if (response->wasHttpError()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": HTTP " +
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
response->getHttpReturnMessage();
return TRI_ERROR_REPLICATION_MASTER_ERROR;
}
auto builder = std::make_shared<VPackBuilder>();
int res = parseResponse(builder, response.get());
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "got invalid response from master at " +
std::string(_masterInfo._endpoint) + ": response is no array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
VPackSlice const slice = builder->slice();
if (!slice.isArray()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": response is no array";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
for (auto const& it : VPackArrayIterator(slice)) {
if (!it.isObject()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": document is no object";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
VPackSlice const keySlice = it.get(StaticStrings::KeyString);
if (!keySlice.isString()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": document key is invalid";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
VPackSlice const revSlice = it.get(StaticStrings::RevString);
if (!revSlice.isString()) {
errorMsg = "got invalid response from master at " +
_masterInfo._endpoint + ": document revision is invalid";
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
DocumentIdentifierToken token = physical->lookupKey(trx, keySlice);
if (!token._data) {
// INSERT
OperationResult opRes = trx->insert(collectionName, it, options);
res = opRes.code;
} else {
// UPDATE
OperationResult opRes = trx->update(collectionName, it, options);
res = opRes.code;
}
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief incrementally fetch data from a collection
////////////////////////////////////////////////////////////////////////////////
int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
std::string const& keysId,
std::string const& cid,
std::string const& collectionName,
TRI_voc_tick_t maxTick,
std::string& errorMsg) {
std::string progress =
"collecting local keys for collection '" + collectionName + "'";
setProgress(progress);
// fetch all local keys from primary index
VPackBuilder dataLake;
std::vector<uint8_t const*> markers;
MMFilesDocumentDitch* ditch = nullptr;
@ -1070,23 +1632,20 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
return res.errorNumber();
}
MMFilesCollection* mmcol =
dynamic_cast<MMFilesCollection*>(col->getPhysical());
if (mmcol != nullptr) {
ditch = mmcol->ditches()->createMMFilesDocumentDitch(false, __FILE__,
__LINE__);
ditch = arangodb::MMFilesCollection::toMMFilesCollection(col)
->ditches()
->createMMFilesDocumentDitch(false, __FILE__, __LINE__);
if (ditch == nullptr) {
return TRI_ERROR_OUT_OF_MEMORY;
}
if (ditch == nullptr) {
return TRI_ERROR_OUT_OF_MEMORY;
}
}
// TRI_ASSERT(ditch != nullptr);
TRI_DEFER(if (ditch != nullptr) {
arangodb::MMFilesCollection::toMMFilesCollection(col)->ditches()->freeDitch(
ditch);
});
TRI_ASSERT(ditch != nullptr);
TRI_DEFER(arangodb::MMFilesCollection::toMMFilesCollection(col)
->ditches()
->freeDitch(ditch));
{
SingleCollectionTransaction trx(
@ -1110,18 +1669,11 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
uint64_t iterations = 0;
ManagedDocumentResult mmdr;
dataLake.openArray();
trx.invokeOnAllElements(
trx.name(), [this, &trx, &mmdr, &markers, &iterations,
&dataLake](DocumentIdentifierToken const& token) {
trx.name(), [this, &trx, &mmdr, &markers,
&iterations](DocumentIdentifierToken const& token) {
if (trx.documentCollection()->readDocument(&trx, token, mmdr)) {
VPackSlice doc(mmdr.vpack());
dataLake.openObject();
dataLake.add(StaticStrings::KeyString,
doc.get(StaticStrings::KeyString));
dataLake.add(StaticStrings::RevString,
doc.get(StaticStrings::RevString));
dataLake.close();
markers.emplace_back(mmdr.vpack());
if (++iterations % 10000 == 0) {
if (checkAborted()) {
@ -1131,11 +1683,6 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
}
return true;
});
dataLake.close();
// because live is hard
for (VPackSlice const& slice : VPackArrayIterator(dataLake.slice())) {
markers.emplace_back(slice.start());
}
if (checkAborted()) {
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
@ -1340,7 +1887,9 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
// Neither it nor it's indexes can be invalidated
// TODO Move to MMFiles
PhysicalCollection* physical = trx.documentCollection()->getPhysical();
auto physical = static_cast<MMFilesCollection*>(
trx.documentCollection()->getPhysical());
auto idx = physical->primaryIndex();
size_t const currentChunkId = i;
progress = "processing keys chunk " + std::to_string(currentChunkId) +
@ -1525,11 +2074,13 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
}
}
DocumentIdentifierToken token = physical->lookupKey(&trx, keySlice);
if (!token._data) {
MMFilesSimpleIndexElement element = idx->lookupKey(&trx, keySlice);
if (!element) {
// key not found locally
toFetch.emplace_back(i);
} else if (TRI_RidToString(token._data) != pair.at(1).copyString()) {
} else if (TRI_RidToString(element.revisionId()) !=
pair.at(1).copyString()) {
// key found, but revision id differs
toFetch.emplace_back(i);
++nextStart;
@ -1643,9 +2194,9 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
}
DocumentIdentifierToken token = physical->lookupKey(&trx, keySlice);
MMFilesSimpleIndexElement element = idx->lookupKey(&trx, keySlice);
if (!token._data) {
if (!element) {
// INSERT
OperationResult opRes = trx.insert(collectionName, it, options);
res = opRes.code;

View File

@ -185,14 +185,34 @@ class InitialSyncer : public Syncer {
int handleCollectionSync(arangodb::LogicalCollection*, std::string const&,
std::string const&, TRI_voc_tick_t, std::string&);
//////////////////////////////////////////////////////////////////////////////
/// @brief incrementally fetch data from a collection
//////////////////////////////////////////////////////////////////////////////
int handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
std::string const& keysId, std::string const& cid,
std::string const& collectionName, TRI_voc_tick_t maxTick,
std::string& errorMsg);
//////////////////////////////////////////////////////////////////////////////
/// @brief incrementally fetch chunk data from a collection
//////////////////////////////////////////////////////////////////////////////
int syncChunkRocksDB(SingleCollectionTransaction* trx,
std::string const& keysId,
uint64_t chunkId,
std::string const& lowKey, std::string const& highKey,
std::vector<std::pair<std::string, uint64_t>> markers,
std::string& errorMsg);
//////////////////////////////////////////////////////////////////////////////
/// @brief incrementally fetch data from a collection
//////////////////////////////////////////////////////////////////////////////
int handleSyncKeys(arangodb::LogicalCollection*, std::string const&,
std::string const&, std::string const&, TRI_voc_tick_t,
std::string&);
int handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
std::string const& keysId, std::string const& cid,
std::string const& collectionName, TRI_voc_tick_t maxTick,
std::string& errorMsg);
//////////////////////////////////////////////////////////////////////////////
/// @brief changes the properties of a collection, based on the VelocyPack

View File

@ -23,6 +23,7 @@
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#include "Basics/StringRef.h"
#include "RocksDBEngine/RocksDBCommon.h"
#include "RocksDBEngine/RocksDBComparator.h"
#include "RocksDBEngine/RocksDBEngine.h"
@ -35,6 +36,7 @@
#include <rocksdb/comparator.h>
#include <rocksdb/convenience.h>
#include <rocksdb/utilities/transaction_db.h>
#include <velocypack/Iterator.h>
#include "Logger/Logger.h"
namespace arangodb {
@ -127,6 +129,68 @@ void uint64ToPersistent(std::string& p, uint64_t value) {
} while (++len < sizeof(uint64_t));
}
bool hasObjectIds(VPackSlice const& inputSlice) {
bool rv = false;
if (inputSlice.isObject()) {
for (auto const& objectPair :
arangodb::velocypack::ObjectIterator(inputSlice)) {
if (arangodb::StringRef(objectPair.key) == "objectId") {
return true;
}
rv = hasObjectIds(objectPair.value);
if (rv) {
return rv;
}
}
} else if (inputSlice.isArray()) {
for (auto const& slice : arangodb::velocypack::ArrayIterator(inputSlice)) {
if (rv) {
return rv;
}
rv = hasObjectIds(slice);
}
}
return rv;
}
VPackBuilder& stripObjectIdsImpl(VPackBuilder& builder, VPackSlice const& inputSlice) {
if (inputSlice.isObject()) {
builder.openObject();
for (auto const& objectPair :
arangodb::velocypack::ObjectIterator(inputSlice)) {
if (arangodb::StringRef(objectPair.key) == "objectId") {
continue;
}
builder.add(objectPair.key);
stripObjectIdsImpl(builder, objectPair.value);
}
builder.close();
} else if (inputSlice.isArray()) {
builder.openArray();
for (auto const& slice : arangodb::velocypack::ArrayIterator(inputSlice)) {
stripObjectIdsImpl(builder, slice);
}
builder.close();
} else {
builder.add(inputSlice);
}
return builder;
}
std::pair<VPackSlice, std::unique_ptr<VPackBuffer<uint8_t>>> stripObjectIds(
VPackSlice const& inputSlice, bool checkBeforeCopy) {
std::unique_ptr<VPackBuffer<uint8_t>> buffer = nullptr;
if (checkBeforeCopy) {
if (!hasObjectIds(inputSlice)) {
return {inputSlice, std::move(buffer)};
}
}
buffer.reset(new VPackBuffer<uint8_t>);
VPackBuilder builder(*buffer);
stripObjectIdsImpl(builder, inputSlice);
return {VPackSlice(buffer->data()), std::move(buffer)};
}
RocksDBTransactionState* toRocksTransactionState(transaction::Methods* trx) {
TRI_ASSERT(trx != nullptr);
TransactionState* state = trx->state();

View File

@ -88,6 +88,10 @@ arangodb::Result convertStatus(rocksdb::Status const&,
uint64_t uint64FromPersistent(char const* p);
void uint64ToPersistent(char* p, uint64_t value);
void uint64ToPersistent(std::string& out, uint64_t value);
std::pair<VPackSlice, std::unique_ptr<VPackBuffer<uint8_t>>> stripObjectIds(
VPackSlice const& inputSlice, bool checkBeforeCopy = true);
RocksDBTransactionState* toRocksTransactionState(transaction::Methods* trx);
rocksdb::TransactionDB* globalRocksDB();
RocksDBEngine* globalRocksEngine();

View File

@ -251,7 +251,7 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
void RocksDBEdgeIndex::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
// acquire rocksdb transaction
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
rocksdb::Transaction* rtrx = state->rocksTransaction();

View File

@ -111,7 +111,7 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
void batchInsert(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr) override;
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
int drop() override;

View File

@ -30,12 +30,14 @@
#include "Basics/StaticStrings.h"
#include "Basics/Thread.h"
#include "Basics/VelocyPackHelper.h"
#include "Basics/build.h"
#include "GeneralServer/RestHandlerFactory.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "RestHandler/RestHandlerCreator.h"
#include "RestServer/DatabasePathFeature.h"
#include "RestServer/ServerIdFeature.h"
#include "RestServer/ViewTypesFeature.h"
#include "RocksDBEngine/RocksDBBackgroundThread.h"
#include "RocksDBEngine/RocksDBCollection.h"
@ -54,6 +56,7 @@
#include "RocksDBEngine/RocksDBV8Functions.h"
#include "RocksDBEngine/RocksDBValue.h"
#include "RocksDBEngine/RocksDBView.h"
#include "VocBase/replication-applier.h"
#include "VocBase/ticks.h"
#include <rocksdb/convenience.h>
@ -227,7 +230,12 @@ void RocksDBEngine::start() {
}
}
void RocksDBEngine::stop() {}
void RocksDBEngine::stop() {
if (!isEnabled()) {
return;
}
replicationManager()->dropAll();
}
void RocksDBEngine::unprepare() {
if (!isEnabled()) {
@ -300,7 +308,6 @@ void RocksDBEngine::getDatabases(arangodb::velocypack::Builder& result) {
rocksdb::ReadOptions readOptions;
std::unique_ptr<rocksdb::Iterator> iter(_db->NewIterator(readOptions));
result.openArray();
auto rSlice = rocksDBSlice(RocksDBEntryType::Database);
for (iter->Seek(rSlice); iter->Valid() && iter->key().starts_with(rSlice);
@ -484,7 +491,7 @@ TRI_vocbase_t* RocksDBEngine::openDatabase(
return openExistingDatabase(id, name, true, isUpgrade);
}
RocksDBEngine::Database* RocksDBEngine::createDatabase(
TRI_vocbase_t* RocksDBEngine::createDatabase(
TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) {
status = TRI_ERROR_NO_ERROR;
auto vocbase = std::make_unique<TRI_vocbase_t>(TRI_VOCBASE_TYPE_NORMAL, id,
@ -517,10 +524,6 @@ int RocksDBEngine::writeCreateCollectionMarker(TRI_voc_tick_t databaseId,
void RocksDBEngine::prepareDropDatabase(TRI_vocbase_t* vocbase,
bool useWriteMarker, int& status) {
// probably not required
// THROW_ARANGO_NOT_YET_IMPLEMENTED();
// status = saveDatabaseParameters(vocbase->id(), vocbase->name(), true);
VPackBuilder builder;
builder.openObject();
builder.add("id", VPackValue(std::to_string(vocbase->id())));
@ -531,7 +534,8 @@ void RocksDBEngine::prepareDropDatabase(TRI_vocbase_t* vocbase,
status = writeCreateDatabaseMarker(vocbase->id(), builder.slice());
}
Result RocksDBEngine::dropDatabase(Database* database) {
Result RocksDBEngine::dropDatabase(TRI_vocbase_t* database) {
replicationManager()->drop(database);
return dropDatabase(database->id());
}
@ -849,6 +853,58 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
return it->second;
}
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder){
Result res;
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
if (!status.ok()) {
res = rocksutils::convertStatus(status).errorNumber();
return res;
}
builder.add(VPackValue(VPackValueType::Object)); // Base
rocksdb::SequenceNumber lastTick = _db->GetLatestSequenceNumber();
// "state" part
builder.add("state", VPackValue(VPackValueType::Object)); //open
builder.add("running", VPackValue(true));
builder.add("lastLogTick", VPackValue(std::to_string(lastTick)));
builder.add("lastUncommittedLogTick", VPackValue(std::to_string(lastTick)));
builder.add("totalEvents", VPackValue(0)); // s.numEvents + s.numEventsSync
builder.add("time", VPackValue(utilities::timeString()));
builder.close();
// "server" part
builder.add("server", VPackValue(VPackValueType::Object)); //open
builder.add("version", VPackValue(ARANGODB_VERSION));
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
builder.close();
// "clients" part
builder.add("clients", VPackValue(VPackValueType::Array)); //open
if(vocbase != nullptr) { //add clients
auto allClients = vocbase->getReplicationClients();
for (auto& it : allClients) {
// One client
builder.add(VPackValue(VPackValueType::Object));
builder.add("serverId", VPackValue(std::to_string(std::get<0>(it))));
char buffer[21];
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
builder.add("time", VPackValue(buffer));
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
builder.close();
}
}
builder.close(); // clients
builder.close(); // base
return res;
}
Result RocksDBEngine::dropDatabase(TRI_voc_tick_t id) {
using namespace rocksutils;
Result res;

View File

@ -123,14 +123,14 @@ class RocksDBEngine final : public StorageEngine {
virtual TRI_vocbase_t* openDatabase(
arangodb::velocypack::Slice const& parameters, bool isUpgrade,
int&) override;
Database* createDatabase(TRI_voc_tick_t id,
TRI_vocbase_t* createDatabase(TRI_voc_tick_t id,
arangodb::velocypack::Slice const& args,
int& status) override;
int writeCreateDatabaseMarker(TRI_voc_tick_t id,
VPackSlice const& slice) override;
void prepareDropDatabase(TRI_vocbase_t* vocbase, bool useWriteMarker,
int& status) override;
Result dropDatabase(Database* database) override;
Result dropDatabase(TRI_vocbase_t* database) override;
void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) override;
// wal in recovery
@ -248,6 +248,8 @@ class RocksDBEngine final : public StorageEngine {
void addCollectionMapping(uint64_t, TRI_voc_tick_t, TRI_voc_cid_t);
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
Result createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder);
private:
Result dropDatabase(TRI_voc_tick_t);
bool systemDatabaseExists();

View File

@ -33,6 +33,7 @@
#include "Transaction/Helpers.h"
#include "Transaction/StandaloneContext.h"
#include "Transaction/UserTransaction.h"
#include "Utils/DatabaseGuard.h"
#include "VocBase/replication-common.h"
#include "VocBase/ticks.h"
@ -48,12 +49,14 @@ double const RocksDBReplicationContext::DefaultTTL = 30 * 60.0;
RocksDBReplicationContext::RocksDBReplicationContext()
: _id(TRI_NewTickServer()),
_lastTick(0),
_currentTick(0),
_trx(),
_collection(nullptr),
_iter(),
_mdr(),
_customTypeHandler(),
_vpackOptions(Options::Defaults),
_lastChunkOffset(0),
_expires(TRI_microtime() + DefaultTTL),
_isDeleted(false),
_isUsed(true),
@ -77,22 +80,26 @@ uint64_t RocksDBReplicationContext::count() const {
// creates new transaction/snapshot
void RocksDBReplicationContext::bind(TRI_vocbase_t* vocbase) {
releaseDumpingResources();
_trx = createTransaction(vocbase);
if ((_trx.get() == nullptr) || (_trx->vocbase() != vocbase)) {
releaseDumpingResources();
_trx = createTransaction(vocbase);
}
}
int RocksDBReplicationContext::bindCollection(
std::string const& collectionName) {
if ((_collection == nullptr) || _collection->name() != collectionName) {
if ((_collection == nullptr) ||
((_collection->name() != collectionName) &&
std::to_string(_collection->cid()) != collectionName)) {
_collection = _trx->vocbase()->lookupCollection(collectionName);
if (_collection == nullptr) {
return TRI_ERROR_BAD_PARAMETER;
RocksDBReplicationResult(TRI_ERROR_BAD_PARAMETER, _lastTick);
}
_trx->addCollectionAtRuntime(collectionName);
_iter = _collection->getAllIterator(_trx.get(), &_mdr,
false); //_mdr is not used nor updated
_currentTick = 1;
_hasMore = true;
}
return TRI_ERROR_NO_ERROR;
@ -127,7 +134,10 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
if (_trx.get() == nullptr) {
return RocksDBReplicationResult(TRI_ERROR_BAD_PARAMETER, _lastTick);
}
bindCollection(collectionName);
int res = bindCollection(collectionName);
if (res != TRI_ERROR_NO_ERROR) {
return RocksDBReplicationResult(res, _lastTick);
}
// set type
int type = 2300; // documents
@ -172,13 +182,19 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
try {
_hasMore = _iter->next(cb, 10); // TODO: adjust limit?
} catch (std::exception const& ex) {
_hasMore = false;
return RocksDBReplicationResult(TRI_ERROR_INTERNAL, _lastTick);
} catch (RocksDBReplicationResult const& ex) {
_hasMore = false;
return ex;
}
}
return RocksDBReplicationResult(TRI_ERROR_NO_ERROR, _lastTick);
if (_hasMore) {
_currentTick++;
}
return RocksDBReplicationResult(TRI_ERROR_NO_ERROR, _currentTick);
}
arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
@ -193,7 +209,6 @@ arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
VPackSlice highKey; // FIXME: no good keeping this
uint64_t hash = 0x012345678;
// auto cb = [&](DocumentIdentifierToken const& token, StringRef const& key) {
auto cb = [&](DocumentIdentifierToken const& token) {
bool ok = _collection->readDocument(_trx.get(), token, _mdr);
if (!ok) {
@ -218,7 +233,6 @@ arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
b.openArray();
while (_hasMore && true /*sizelimit*/) {
try {
//_hasMore = primary->nextWithKey(cb, chunkSize);
_hasMore = primary->next(cb, chunkSize);
b.add(VPackValue(VPackValueType::Object));
@ -378,10 +392,13 @@ void RocksDBReplicationContext::releaseDumpingResources() {
_iter.reset();
}
_collection = nullptr;
_guard.reset();
}
std::unique_ptr<transaction::Methods>
RocksDBReplicationContext::createTransaction(TRI_vocbase_t* vocbase) {
_guard.reset(new DatabaseGuard(vocbase));
double lockTimeout = transaction::Methods::DefaultLockTimeout;
std::shared_ptr<transaction::StandaloneContext> ctx =
transaction::StandaloneContext::Create(vocbase);
@ -389,6 +406,7 @@ RocksDBReplicationContext::createTransaction(TRI_vocbase_t* vocbase) {
ctx, {}, {}, {}, lockTimeout, false, true));
Result res = trx->begin();
if (!res.ok()) {
_guard.reset();
THROW_ARANGO_EXCEPTION(res);
}
_customTypeHandler = ctx->orderCustomTypeHandler();

View File

@ -36,6 +36,7 @@
#include <velocypack/Slice.h>
namespace arangodb {
class DatabaseGuard;
class RocksDBReplicationContext {
public:
@ -53,6 +54,13 @@ class RocksDBReplicationContext {
TRI_voc_tick_t id() const;
uint64_t lastTick() const;
uint64_t count() const;
TRI_vocbase_t* vocbase() const {
if (_trx == nullptr) {
return nullptr;
}
return _trx->vocbase();
}
// creates new transaction/snapshot
void bind(TRI_vocbase_t*);
@ -106,13 +114,15 @@ class RocksDBReplicationContext {
private:
TRI_voc_tick_t _id;
uint64_t _lastTick;
uint64_t _currentTick;
std::unique_ptr<transaction::Methods> _trx;
LogicalCollection* _collection;
std::unique_ptr<IndexIterator> _iter;
ManagedDocumentResult _mdr;
std::shared_ptr<arangodb::velocypack::CustomTypeHandler> _customTypeHandler;
arangodb::velocypack::Options _vpackOptions;
uint64_t _lastChunkOffset = 0;
uint64_t _lastChunkOffset;
std::unique_ptr<DatabaseGuard> _guard;
double _expires;
bool _isDeleted;

View File

@ -238,6 +238,40 @@ bool RocksDBReplicationManager::containsUsedContext() {
return false;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief drop contexts by database (at least mark them as deleted)
////////////////////////////////////////////////////////////////////////////////
void RocksDBReplicationManager::drop(TRI_vocbase_t* vocbase) {
{
MUTEX_LOCKER(mutexLocker, _lock);
for (auto& context : _contexts) {
if (context.second->vocbase() == vocbase) {
context.second->deleted();
}
}
}
garbageCollect(true);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief drop all contexts (at least mark them as deleted)
////////////////////////////////////////////////////////////////////////////////
void RocksDBReplicationManager::dropAll() {
{
MUTEX_LOCKER(mutexLocker, _lock);
for (auto& context : _contexts) {
context.second->deleted();
}
}
garbageCollect(true);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief run a garbage collection on the contexts
////////////////////////////////////////////////////////////////////////////////

View File

@ -92,6 +92,18 @@ class RocksDBReplicationManager {
bool containsUsedContext();
//////////////////////////////////////////////////////////////////////////////
/// @brief drop contexts by database (at least mark them as deleted)
//////////////////////////////////////////////////////////////////////////////
void drop(TRI_vocbase_t*);
//////////////////////////////////////////////////////////////////////////////
/// @brief drop all contexts (at least mark them as deleted)
//////////////////////////////////////////////////////////////////////////////
void dropAll();
//////////////////////////////////////////////////////////////////////////////
/// @brief run a garbage collection on the contexts
//////////////////////////////////////////////////////////////////////////////

View File

@ -43,7 +43,7 @@ class WBReader : public rocksdb::WriteBatch::Handler {
explicit WBReader(TRI_vocbase_t* vocbase, uint64_t from, size_t& limit,
bool includeSystem, VPackBuilder& builder)
: _vocbase(vocbase),
_from(from),
/* _from(from), */
_limit(limit),
_includeSystem(includeSystem),
_builder(builder) {}
@ -170,7 +170,7 @@ class WBReader : public rocksdb::WriteBatch::Handler {
private:
TRI_vocbase_t* _vocbase;
uint64_t _from;
/* uint64_t _from; */
size_t& _limit;
bool _includeSystem;
VPackBuilder& _builder;

View File

@ -30,6 +30,7 @@
#include "Basics/conversions.h"
#include "Basics/files.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterFeature.h"
#include "Cluster/ClusterMethods.h"
#include "Cluster/FollowerInfo.h"
#include "GeneralServer/GeneralServer.h"
@ -328,58 +329,12 @@ bool RocksDBRestReplicationHandler::isCoordinatorError() {
void RocksDBRestReplicationHandler::handleCommandLoggerState() {
VPackBuilder builder;
builder.add(VPackValue(VPackValueType::Object)); // Base
// MMFilesLogfileManager::instance()->waitForSync(10.0);
// MMFilesLogfileManagerState const s =
// MMFilesLogfileManager::instance()->state();
rocksdb::TransactionDB* db =
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
if (!status.ok()) {
Result res = rocksutils::convertStatus(status).errorNumber();
auto res = globalRocksEngine()->createLoggerState(_vocbase, builder);
if (res.fail()) {
generateError(rest::ResponseCode::BAD, res.errorNumber(),
res.errorMessage());
return;
}
rocksdb::SequenceNumber lastTick = db->GetLatestSequenceNumber();
// "state" part
builder.add("state", VPackValue(VPackValueType::Object));
builder.add("running", VPackValue(true));
builder.add("lastLogTick", VPackValue(std::to_string(lastTick)));
builder.add("lastUncommittedLogTick",
VPackValue(std::to_string(lastTick + 1)));
builder.add("totalEvents", VPackValue(0)); // s.numEvents + s.numEventsSync
builder.add("time", VPackValue(utilities::timeString()));
builder.close();
// "server" part
builder.add("server", VPackValue(VPackValueType::Object));
builder.add("version", VPackValue(ARANGODB_VERSION));
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
builder.close();
// "clients" part
builder.add("clients", VPackValue(VPackValueType::Array));
auto allClients = _vocbase->getReplicationClients();
for (auto& it : allClients) {
// One client
builder.add(VPackValue(VPackValueType::Object));
builder.add("serverId", VPackValue(std::to_string(std::get<0>(it))));
char buffer[21];
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
builder.add("time", VPackValue(buffer));
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
builder.close();
}
builder.close(); // clients
builder.close(); // base
generateResult(rest::ResponseCode::OK, builder.slice());
}
@ -872,7 +827,8 @@ void RocksDBRestReplicationHandler::handleCommandRestoreCollection() {
"invalid JSON");
return;
}
VPackSlice const slice = parsedRequest->slice();
auto pair = stripObjectIds(parsedRequest->slice());
VPackSlice const slice = pair.first;
bool overwrite = false;
@ -1776,14 +1732,15 @@ int RocksDBRestReplicationHandler::processRestoreCollectionCoordinator(
if (dropExisting) {
int res = ci->dropCollectionCoordinator(dbName, col->cid_as_string(),
errorMsg, 0.0);
if (res == TRI_ERROR_FORBIDDEN) {
if (res == TRI_ERROR_FORBIDDEN ||
res == TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE) {
// some collections must not be dropped
res = truncateCollectionOnCoordinator(dbName, name);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg =
"unable to truncate collection (dropping is forbidden): " + name;
return res;
}
return res;
}
if (res != TRI_ERROR_NO_ERROR) {
@ -1863,8 +1820,9 @@ int RocksDBRestReplicationHandler::processRestoreCollectionCoordinator(
VPackSlice const merged = mergedBuilder.slice();
try {
bool createWaitsForSyncReplication = application_features::ApplicationServer::getFeature<ClusterFeature>("Cluster")->createWaitsForSyncReplication();
auto col = ClusterMethods::createCollectionOnCoordinator(collectionType,
_vocbase, merged);
_vocbase, merged, true, createWaitsForSyncReplication);
TRI_ASSERT(col != nullptr);
} catch (basics::Exception const& e) {
// Error, report it.

View File

@ -106,4 +106,5 @@ void ListenTask::stop() {
_bound = false;
_acceptor->close();
_acceptor.reset();
}

View File

@ -65,7 +65,7 @@ class ConnectionStatistics {
_error = false;
}
static size_t const QUEUE_SIZE = 5000;
static size_t const QUEUE_SIZE = 64 * 1024 - 2; // current (1.62) boost maximum
static Mutex _dataLock;

View File

@ -152,7 +152,7 @@ class RequestStatistics {
void trace_log();
private:
static size_t const QUEUE_SIZE = 1000;
static size_t const QUEUE_SIZE = 64 * 1024 - 2; // current (1.62) boost maximum
static arangodb::Mutex _dataLock;

View File

@ -146,7 +146,6 @@ class StorageEngine : public application_features::ApplicationFeature {
// TODO add pre / post conditions for functions
using Database = TRI_vocbase_t;
using CollectionView = LogicalCollection;
virtual void waitForSync(TRI_voc_tick_t tick) = 0;
@ -154,10 +153,10 @@ class StorageEngine : public application_features::ApplicationFeature {
//// operations on databasea
/// @brief opens a database
virtual Database* openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade, int& status) = 0;
Database* openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade){
virtual TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade, int& status) = 0;
TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade){
int status;
Database* rv = openDatabase(args, isUpgrade, status);
TRI_vocbase_t* rv = openDatabase(args, isUpgrade, status);
TRI_ASSERT(status == TRI_ERROR_NO_ERROR);
TRI_ASSERT(rv != nullptr);
return rv;
@ -172,16 +171,16 @@ class StorageEngine : public application_features::ApplicationFeature {
// the WAL entry for the database creation will be written *after* the call
// to "createDatabase" returns
// no way to acquire id within this function?!
virtual Database* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) = 0;
Database* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args ){
virtual TRI_vocbase_t* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) = 0;
TRI_vocbase_t* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args ){
int status;
Database* rv = createDatabase(id, args, status);
TRI_vocbase_t* rv = createDatabase(id, args, status);
TRI_ASSERT(status == TRI_ERROR_NO_ERROR);
TRI_ASSERT(rv != nullptr);
return rv;
}
// @brief wirte create marker for database
// @brief write create marker for database
virtual int writeCreateDatabaseMarker(TRI_voc_tick_t id, VPackSlice const& slice) = 0;
// asks the storage engine to drop the specified database and persist the
@ -194,14 +193,14 @@ class StorageEngine : public application_features::ApplicationFeature {
//
// is done under a lock in database feature
virtual void prepareDropDatabase(TRI_vocbase_t* vocbase, bool useWriteMarker, int& status) = 0;
void prepareDropDatabase(Database* db, bool useWriteMarker){
void prepareDropDatabase(TRI_vocbase_t* db, bool useWriteMarker){
int status = 0;
prepareDropDatabase(db, useWriteMarker, status);
TRI_ASSERT(status == TRI_ERROR_NO_ERROR);
};
// perform a physical deletion of the database
virtual Result dropDatabase(Database*) = 0;
virtual Result dropDatabase(TRI_vocbase_t*) = 0;
/// @brief wait until a database directory disappears - not under lock in databaseFreature
virtual void waitUntilDeletion(TRI_voc_tick_t id, bool force, int& status) = 0;

View File

@ -40,7 +40,9 @@ class DatabaseGuard {
explicit DatabaseGuard(TRI_vocbase_t* vocbase)
: _vocbase(vocbase) {
TRI_ASSERT(vocbase != nullptr);
_vocbase->use();
if (!_vocbase->use()) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
}
}
/// @brief create the guard, using a database id

View File

@ -566,7 +566,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase,
TimedAction exitWhenNoContext([](double waitTime) {
LOG_TOPIC(WARN, arangodb::Logger::V8) << "giving up waiting for unused V8 context after " << Logger::FIXED(waitTime) << " s";
}, 120);
}, 60);
V8Context* context = nullptr;

View File

@ -63,46 +63,44 @@ static void JS_StateLoggerReplication(
v8::HandleScope scope(isolate);
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
v8::Handle<v8::Object> result = v8::Object::New(isolate);
v8::Handle<v8::Object> state = v8::Object::New(isolate);
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
if(engineName == "mmfiles"){
v8::Handle<v8::Object> state = v8::Object::New(isolate);
MMFilesLogfileManagerState const s = MMFilesLogfileManager::instance()->state();
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastCommittedTick));
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastAssignedTick));
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
v8::Number::New(isolate, static_cast<double>(s.numEvents + s.numEventsSync)));
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(s.timeString));
result->Set(TRI_V8_ASCII_STRING("state"), state);
v8::Handle<v8::Object> server = v8::Object::New(isolate);
server->Set(TRI_V8_ASCII_STRING("version"),
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
server->Set(TRI_V8_ASCII_STRING("serverId"),
TRI_V8_STD_STRING(StringUtils::itoa(ServerIdFeature::getId())));
result->Set(TRI_V8_ASCII_STRING("server"), server);
v8::Handle<v8::Object> clients = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("clients"), clients);
} else if (engineName == "rocksdb") {
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
uint64_t lastTick = db->GetLatestSequenceNumber();
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"),
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
v8::Number::New(isolate, static_cast<double>(0))); //s.numEvents + s.numEventsSync)));
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(utilities::timeString()));
VPackBuilder builder;
auto res = rocksutils::globalRocksEngine()->createLoggerState(nullptr,builder);
if(res.fail()){
TRI_V8_THROW_EXCEPTION(res);
return;
}
v8::Handle<v8::Value>resultValue = TRI_VPackToV8(isolate, builder.slice());
result = v8::Handle<v8::Object>::Cast(resultValue);
} else {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid storage engine");
return;
}
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("state"), state);
v8::Handle<v8::Object> server = v8::Object::New(isolate);
server->Set(TRI_V8_ASCII_STRING("version"),
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
server->Set(TRI_V8_ASCII_STRING("serverId"),
TRI_V8_STD_STRING(StringUtils::itoa(ServerIdFeature::getId())));
result->Set(TRI_V8_ASCII_STRING("server"), server);
v8::Handle<v8::Object> clients = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("clients"), clients);
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}

View File

@ -27,6 +27,7 @@
#include "Basics/VelocyPackHelper.h"
#include "Basics/conversions.h"
#include "Basics/tri-strings.h"
#include "Cluster/ClusterFeature.h"
#include "Cluster/ClusterInfo.h"
#include "Cluster/ClusterMethods.h"
#include "Indexes/Index.h"
@ -669,12 +670,8 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND);
}
// ...........................................................................
// We require exactly 1 or exactly 2 arguments -- anything else is an error
// ...........................................................................
if (args.Length() < 1 || args.Length() > 3) {
TRI_V8_THROW_EXCEPTION_USAGE("_create(<name>, <properties>, <type>)");
if (args.Length() < 1 || args.Length() > 4) {
TRI_V8_THROW_EXCEPTION_USAGE("_create(<name>, <properties>, <type>, <options>)");
}
if (TRI_GetOperationModeServer() == TRI_VOCBASE_MODE_NO_CREATE) {
@ -682,7 +679,7 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
}
// optional, third parameter can override collection type
if (args.Length() == 3 && args[2]->IsString()) {
if (args.Length() >= 3 && args[2]->IsString()) {
std::string typeString = TRI_ObjectToString(args[2]);
if (typeString == "edge") {
collectionType = TRI_COL_TYPE_EDGE;
@ -691,6 +688,7 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
}
}
PREVENT_EMBEDDED_TRANSACTION();
// extract the name
@ -725,9 +723,19 @@ static void CreateVocBase(v8::FunctionCallbackInfo<v8::Value> const& args,
infoSlice = builder.slice();
if (ServerState::instance()->isCoordinator()) {
bool createWaitsForSyncReplication = application_features::ApplicationServer::getFeature<ClusterFeature>("Cluster")->createWaitsForSyncReplication();
if (args.Length() >= 3 && args[args.Length()-1]->IsObject()) {
v8::Handle<v8::Object> obj = args[args.Length()-1]->ToObject();
auto v8WaitForSyncReplication = obj->Get(TRI_V8_ASCII_STRING("waitForSyncReplication"));
if (!v8WaitForSyncReplication->IsUndefined()) {
createWaitsForSyncReplication = TRI_ObjectToBoolean(v8WaitForSyncReplication);
}
}
std::unique_ptr<LogicalCollection> col =
ClusterMethods::createCollectionOnCoordinator(collectionType, vocbase,
infoSlice);
infoSlice, true, createWaitsForSyncReplication);
TRI_V8_RETURN(WrapCollection(isolate, col.release()));
}

View File

@ -1,160 +0,0 @@
# - Check which parts of the C++11 standard the compiler supports
#
# When found it will set the following variables
#
# CXX11_COMPILER_FLAGS - the compiler flags needed to get C++11 features
#
# HAS_CXX11_AUTO - auto keyword
# HAS_CXX11_AUTO_RET_TYPE - function declaration with deduced return types
# HAS_CXX11_CLASS_OVERRIDE - override and final keywords for classes and methods
# HAS_CXX11_CONSTEXPR - constexpr keyword
# HAS_CXX11_CSTDINT_H - cstdint header
# HAS_CXX11_DECLTYPE - decltype keyword
# HAS_CXX11_FUNC - __func__ preprocessor constant
# HAS_CXX11_INITIALIZER_LIST - initializer list
# HAS_CXX11_LAMBDA - lambdas
# HAS_CXX11_LIB_REGEX - regex library
# HAS_CXX11_LONG_LONG - long long signed & unsigned types
# HAS_CXX11_NULLPTR - nullptr
# HAS_CXX11_RVALUE_REFERENCES - rvalue references
# HAS_CXX11_SIZEOF_MEMBER - sizeof() non-static members
# HAS_CXX11_STATIC_ASSERT - static_assert()
# HAS_CXX11_VARIADIC_TEMPLATES - variadic templates
# HAS_CXX11_SHARED_PTR - Shared Pointer
# HAS_CXX11_THREAD - thread
# HAS_CXX11_MUTEX - mutex
# HAS_CXX11_NOEXCEPT - noexcept
# HAS_CXX11_CONDITIONAL - conditional type definitions
#=============================================================================
# Copyright 2011,2012 Rolf Eike Beer <eike@sf-mail.de>
# Copyright 2012 Andreas Weis
# Copyright 2014 Kaveh Vahedipour <kaveh@codeare.org>
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# (To distribute this file outside of CMake, substitute the full
# License text for the above reference.)
#
# Each feature may have up to 3 checks, every one of them in it's own file
# FEATURE.cpp - example that must build and return 0 when run
# FEATURE_fail.cpp - example that must build, but may not return 0 when run
# FEATURE_fail_compile.cpp - example that must fail compilation
#
# The first one is mandatory, the latter 2 are optional and do not depend on
# each other (i.e. only one may be present).
#
# Modification for std::thread (Kaveh Vahdipour, Forschungszentrum Juelich)
#
IF (NOT CMAKE_CXX_COMPILER_LOADED)
message(FATAL_ERROR "CheckCXX11Features modules only works if language CXX is enabled")
endif ()
cmake_minimum_required(VERSION 2.8.3)
#
### Check for needed compiler flags
#
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-std=c++11" _HAS_CXX11_FLAG)
if (NOT _HAS_CXX11_FLAG)
check_cxx_compiler_flag("-std=c++0x" _HAS_CXX0X_FLAG)
endif ()
if (_HAS_CXX11_FLAG)
set(CXX11_COMPILER_FLAGS "-std=c++11")
elseif (_HAS_CXX0X_FLAG)
set(CXX11_COMPILER_FLAGS "-std=c++0x")
endif ()
function(cxx11_check_feature FEATURE_NAME RESULT_VAR)
if (NOT DEFINED ${RESULT_VAR})
set(_bindir "${CMAKE_CURRENT_BINARY_DIR}/cxx11/${FEATURE_NAME}")
set(_SRCFILE_BASE ${CMAKE_CURRENT_LIST_DIR}/CheckCXX11Features/cxx11-test-${FEATURE_NAME})
set(_LOG_NAME "\"${FEATURE_NAME}\"")
message(STATUS "Checking C++11 support for ${_LOG_NAME}")
set(_SRCFILE "${_SRCFILE_BASE}.cpp")
set(_SRCFILE_FAIL "${_SRCFILE_BASE}_fail.cpp")
set(_SRCFILE_FAIL_COMPILE "${_SRCFILE_BASE}_fail_compile.cpp")
if (CROSS_COMPILING)
try_compile(${RESULT_VAR} "${_bindir}" "${_SRCFILE}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
try_compile(${RESULT_VAR} "${_bindir}_fail" "${_SRCFILE_FAIL}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
else (CROSS_COMPILING)
try_run(_RUN_RESULT_VAR _COMPILE_RESULT_VAR
"${_bindir}" "${_SRCFILE}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
set(${RESULT_VAR} TRUE)
else (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
set(${RESULT_VAR} FALSE)
endif (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
try_run(_RUN_RESULT_VAR _COMPILE_RESULT_VAR
"${_bindir}_fail" "${_SRCFILE_FAIL}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
set(${RESULT_VAR} TRUE)
else (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
set(${RESULT_VAR} FALSE)
endif (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
endif (CROSS_COMPILING)
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL_COMPILE})
try_compile(_TMP_RESULT "${_bindir}_fail_compile" "${_SRCFILE_FAIL_COMPILE}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (_TMP_RESULT)
set(${RESULT_VAR} FALSE)
else (_TMP_RESULT)
set(${RESULT_VAR} TRUE)
endif (_TMP_RESULT)
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL_COMPILE})
if (${RESULT_VAR})
message(STATUS "Checking C++11 support for ${_LOG_NAME}: works")
else (${RESULT_VAR})
message(FATAL_ERROR "Checking C++11 support for ${_LOG_NAME}: not supported")
endif (${RESULT_VAR})
set(${RESULT_VAR} ${${RESULT_VAR}} CACHE INTERNAL "C++11 support for ${_LOG_NAME}")
endif (NOT DEFINED ${RESULT_VAR})
endfunction(cxx11_check_feature)
cxx11_check_feature("__func__" HAS_CXX11_FUNC)
cxx11_check_feature("auto" HAS_CXX11_AUTO)
cxx11_check_feature("auto_ret_type" HAS_CXX11_AUTO_RET_TYPE)
#cxx11_check_feature("atomic_uint_fast16_t" HAS_CXX11_ATOMIC_UINT_FAST16_T)
cxx11_check_feature("class_override_final" HAS_CXX11_CLASS_OVERRIDE)
cxx11_check_feature("constexpr" HAS_CXX11_CONSTEXPR)
cxx11_check_feature("conditional" HAS_CXX11_CONDITIONAL)
#cxx11_check_feature("cstdint" HAS_CXX11_CSTDINT_H)
cxx11_check_feature("decltype" HAS_CXX11_DECLTYPE)
cxx11_check_feature("initializer_list" HAS_CXX11_INITIALIZER_LIST)
cxx11_check_feature("lambda" HAS_CXX11_LAMBDA)
cxx11_check_feature("range_based_for_loop" HAS_CXX11_RANGE_BASED_FOR_LOOP)
#cxx11_check_feature("long_long" HAS_CXX11_LONG_LONG)
cxx11_check_feature("nullptr" HAS_CXX11_NULLPTR)
cxx11_check_feature("tuple" HAS_CXX11_TUPLE)
cxx11_check_feature("regex" HAS_CXX11_LIB_REGEX)
cxx11_check_feature("rvalue-references" HAS_CXX11_RVALUE_REFERENCES)
cxx11_check_feature("sizeof_member" HAS_CXX11_SIZEOF_MEMBER)
cxx11_check_feature("static_assert" HAS_CXX11_STATIC_ASSERT)
cxx11_check_feature("variadic_templates" HAS_CXX11_VARIADIC_TEMPLATES)
cxx11_check_feature("shared_ptr" HAS_CXX11_SHARED_PTR)
cxx11_check_feature("unique_ptr" HAS_CXX11_UNIQUE_PTR)
cxx11_check_feature("weak_ptr" HAS_CXX11_WEAK_PTR)
cxx11_check_feature("thread" HAS_CXX11_THREAD)
cxx11_check_feature("mutex" HAS_CXX11_MUTEX)
cxx11_check_feature("noexcept" HAS_CXX11_NOEXCEPT)

View File

@ -1,8 +0,0 @@
int main(void)
{
if (!__func__)
return 1;
if (!(*__func__))
return 1;
return 0;
}

View File

@ -1,6 +0,0 @@
#include <atomic>
int main () {
std::atomic_uint_fast16_t a;
return 0;
}

View File

@ -1,12 +0,0 @@
int main()
{
auto i = 5;
auto f = 3.14159f;
auto d = 3.14159;
bool ret = (
(sizeof(f) < sizeof(d)) &&
(sizeof(i) == sizeof(int))
);
return ret ? 0 : 1;
}

View File

@ -1,7 +0,0 @@
int main(void)
{
// must fail because there is no initializer
auto i;
return 0;
}

View File

@ -1,8 +0,0 @@
auto foo(int i) -> int {
return i - 1;
}
int main()
{
return foo(1);
}

View File

@ -1,28 +0,0 @@
class base {
public:
virtual int foo(int a)
{ return 4 + a; }
int bar(int a)
{ return a - 2; }
};
class sub final : public base {
public:
virtual int foo(int a) override
{ return 8 + 2 * a; };
};
class sub2 final : public base {
public:
virtual int foo(int a) override final
{ return 8 + 2 * a; };
};
int main(void)
{
base b;
sub s;
sub2 t;
return (b.foo(2) * 2 == s.foo(2) && b.foo(2) * 2 == t.foo(2) ) ? 0 : 1;
}

View File

@ -1,25 +0,0 @@
class base {
public:
virtual int foo(int a)
{ return 4 + a; }
virtual int bar(int a) final
{ return a - 2; }
};
class sub final : public base {
public:
virtual int foo(int a) override
{ return 8 + 2 * a; };
virtual int bar(int a)
{ return a; }
};
class impossible : public sub { };
int main(void)
{
base b;
sub s;
return 1;
}

View File

@ -1,17 +0,0 @@
#include <type_traits>
#include <string>
template<class T> class A {
public:
typedef typename std::conditional<false, const std::string, std::string>::type StringType;
A() : s(""), t(0) {}
virtual ~A () {}
private:
StringType s;
T t;
};
int main() {
A<float> a;
return 0;
}

View File

@ -1,19 +0,0 @@
constexpr int square(int x)
{
return x*x;
}
constexpr int the_answer()
{
return 42;
}
int main()
{
int test_arr[square(3)];
bool ret = (
(square(the_answer()) == 1764) &&
(sizeof(test_arr)/sizeof(test_arr[0]) == 9)
);
return ret ? 0 : 1;
}

View File

@ -1,11 +0,0 @@
#include <cstdint>
int main()
{
bool test =
(sizeof(int8_t) == 1) &&
(sizeof(int16_t) == 2) &&
(sizeof(int32_t) == 4) &&
(sizeof(int64_t) == 8);
return test ? 0 : 1;
}

View File

@ -1,10 +0,0 @@
bool check_size(int i)
{
return sizeof(int) == sizeof(decltype(i));
}
int main()
{
bool ret = check_size(42);
return ret ? 0 : 1;
}

View File

@ -1,27 +0,0 @@
#include <vector>
class seq {
public:
seq(std::initializer_list<int> list);
int length() const;
private:
std::vector<int> m_v;
};
seq::seq(std::initializer_list<int> list)
: m_v(list)
{
}
int seq::length() const
{
return m_v.size();
}
int main(void)
{
seq a = {18, 20, 2, 0, 4, 7};
return (a.length() == 6) ? 0 : 1;
}

View File

@ -1,5 +0,0 @@
int main()
{
int ret = 0;
return ([&ret]() -> int { return ret; })();
}

View File

@ -1,7 +0,0 @@
int main(void)
{
long long l;
unsigned long long ul;
return ((sizeof(l) >= 8) && (sizeof(ul) >= 8)) ? 0 : 1;
}

View File

@ -1,6 +0,0 @@
#include <mutex>
int main() {
std::mutex _mutex;
return 0;
}

View File

@ -1,8 +0,0 @@
volatile void dummy () noexcept {
int a = 0;
}
int main () {
dummy();
return 0;
}

View File

@ -1,6 +0,0 @@
int main(void)
{
void *v = nullptr;
return v ? 1 : 0;
}

View File

@ -1,6 +0,0 @@
int main(void)
{
int i = nullptr;
return 1;
}

View File

@ -1,15 +0,0 @@
int main() {
int my_array[5] = {1, 2, 3, 4, 5};
for (int &x : my_array) {
x *= 2;
}
for (auto &x : my_array) {
x *= 2;
}
}

View File

@ -1,26 +0,0 @@
#include <algorithm>
#include <regex>
int parse_line(std::string const& line)
{
std::string tmp;
if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+//(-)?(\\d)+(\\s)+"))) {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+//(-)?(\\d)+"), std::string("V"));
} else if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+/(-)?(\\d)+(\\s)+"))) {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+/(-)?(\\d)+"), std::string("V"));
} else if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+/(-)?(\\d)+/(-)?(\\d)+(\\s)+"))) {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+/(-)?(\\d)+/(-)?(\\d)+"), std::string("V"));
} else {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+"), std::string("V"));
}
return static_cast<int>(std::count(tmp.begin(), tmp.end(), 'V'));
}
int main()
{
bool test = (parse_line("f 7/7/7 -3/3/-3 2/-2/2") == 3) &&
(parse_line("f 7//7 3//-3 -2//2") == 3) &&
(parse_line("f 7/7 3/-3 -2/2") == 3) &&
(parse_line("f 7 3 -2") == 3);
return test ? 0 : 1;
}

View File

@ -1,57 +0,0 @@
#include <cassert>
class rvmove {
public:
void *ptr;
char *array;
rvmove()
: ptr(0),
array(new char[10])
{
ptr = this;
}
rvmove(rvmove &&other)
: ptr(other.ptr),
array(other.array)
{
other.array = 0;
other.ptr = 0;
}
~rvmove()
{
assert(((ptr != 0) && (array != 0)) || ((ptr == 0) && (array == 0)));
delete[] array;
}
rvmove &operator=(rvmove &&other)
{
delete[] array;
ptr = other.ptr;
array = other.array;
other.array = 0;
other.ptr = 0;
return *this;
}
static rvmove create()
{
return rvmove();
}
private:
rvmove(const rvmove &);
rvmove &operator=(const rvmove &);
};
int main()
{
rvmove mine;
if (mine.ptr != &mine)
return 1;
mine = rvmove::create();
if (mine.ptr == &mine)
return 1;
return 0;
}

View File

@ -1,6 +0,0 @@
#include <memory>
int main() {
std::shared_ptr<int> test;
return 0;
}

View File

@ -1,14 +0,0 @@
struct foo {
char bar;
int baz;
};
int main(void)
{
bool ret = (
(sizeof(foo::bar) == 1) &&
(sizeof(foo::baz) >= sizeof(foo::bar)) &&
(sizeof(foo) >= sizeof(foo::bar) + sizeof(foo::baz))
);
return ret ? 0 : 1;
}

View File

@ -1,9 +0,0 @@
struct foo {
int baz;
double bar;
};
int main(void)
{
return (sizeof(foo::bar) == 4) ? 0 : 1;
}

View File

@ -1,5 +0,0 @@
int main(void)
{
static_assert(0 < 1, "your ordering of integers is screwed");
return 0;
}

View File

@ -1,5 +0,0 @@
int main(void)
{
static_assert(1 < 0, "your ordering of integers is screwed");
return 0;
}

View File

@ -1,6 +0,0 @@
#include <thread>
int main() {
std::thread test;
return 0;
}

View File

@ -1,10 +0,0 @@
#include <tuple>
int main () {
typedef std::tuple <int, double, long &, const char *> test_tuple;
long lengthy = 12;
test_tuple proof (18, 6.5, lengthy, "Ciao!");
lengthy = std::get<0>(proof);
std::get<3>(proof) = " Beautiful!";
return 0;
}

View File

@ -1,6 +0,0 @@
#include <memory>
int main() {
std::unique_ptr<int> test;
return 0;
}

View File

@ -1,23 +0,0 @@
int Accumulate()
{
return 0;
}
template<typename T, typename... Ts>
int Accumulate(T v, Ts... vs)
{
return v + Accumulate(vs...);
}
template<int... Is>
int CountElements()
{
return sizeof...(Is);
}
int main()
{
int acc = Accumulate(1, 2, 3, 4, -5);
int count = CountElements<1,2,3,4,5>();
return ((acc == 5) && (count == 5)) ? 0 : 1;
}

View File

@ -1,6 +0,0 @@
#include <memory>
int main() {
std::weak_ptr<int> test;
return 0;
}

View File

@ -74,7 +74,9 @@ macro (install_readme input output)
if (MSVC)
set(CRLFSTYLE "CRLF")
endif ()
configure_file(${PROJECT_SOURCE_DIR}/${input} "${PROJECT_BINARY_DIR}/${output}" NEWLINE_STYLE ${CRLFSTYLE})
install(
CODE "configure_file(${PROJECT_SOURCE_DIR}/${input} \"${PROJECT_BINARY_DIR}/${output}\" NEWLINE_STYLE ${CRLFSTYLE})")
install(
FILES "${PROJECT_BINARY_DIR}/${output}"
DESTINATION "${where}"

View File

@ -19,3 +19,6 @@ threads = 20
[ssl]
keyfile = @TOP_DIR@/UnitTests/server.pem
[cluster]
system-replication-factor = 1

View File

@ -64,10 +64,12 @@ function collectionRepresentation(collection, showProperties, showCount, showFig
result.indexBuckets = properties.indexBuckets;
if (cluster.isCoordinator()) {
result.shardKeys = properties.shardKeys;
result.avoidServers = properties.avoidServers;
result.numberOfShards = properties.numberOfShards;
result.replicationFactor = properties.replicationFactor;
result.avoidServers = properties.avoidServers;
result.distributeShardsLike = properties.distributeShardsLike;
result.shardKeys = properties.shardKeys;
}
}
@ -203,6 +205,15 @@ function post_api_collection (req, res) {
}
try {
var options = {};
if (req.parameters.hasOwnProperty('waitForSyncReplication')) {
var value = req.parameters.waitForSyncReplication.toLowerCase();
if (value === 'true' || value === 'yes' || value === 'on' || value === 'y' || value === '1') {
options.waitForSyncReplication = true;
} else {
options.waitForSyncReplication = false;
}
}
var collection;
if (typeof (r.type) === 'string') {
if (r.type.toLowerCase() === 'edge' || r.type === '3') {
@ -210,9 +221,9 @@ function post_api_collection (req, res) {
}
}
if (r.type === arangodb.ArangoCollection.TYPE_EDGE) {
collection = arangodb.db._createEdgeCollection(r.name, r.parameters);
collection = arangodb.db._createEdgeCollection(r.name, r.parameters, options);
} else {
collection = arangodb.db._createDocumentCollection(r.name, r.parameters);
collection = arangodb.db._createDocumentCollection(r.name, r.parameters, options);
}
var result = {};

View File

@ -970,12 +970,25 @@ actions.defineHttp({
"body must be an object with a string attribute 'server'");
return;
}
// First translate the server name from short name to long name:
var server = body.server;
var servers = global.ArangoClusterInfo.getDBServers();
for (let i = 0; i < servers.length; i++) {
if (servers[i].serverId !== server) {
if (servers[i].serverName === server) {
server = servers[i].serverId;
break;
}
}
}
var ok = true;
var id;
try {
id = ArangoClusterInfo.uniqid();
var todo = { 'type': 'cleanOutServer',
'server': body.server,
'server': server,
'jobId': id,
'timeCreated': (new Date()).toISOString(),
'creator': ArangoServerState.id() };

View File

@ -39,6 +39,7 @@
'nodes': 'nodes',
'shards': 'shards',
'node/:name': 'node',
'nodeInfo/:id': 'nodeInfo',
'logs': 'logger',
'helpus': 'helpUs',
'graph/:name': 'graph',
@ -327,16 +328,40 @@
return;
}
if (!this.nodeView) {
this.nodeView = new window.NodeView({
coordname: name,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers
});
if (this.nodeView) {
this.nodeView.remove();
}
this.nodeView = new window.NodeView({
coordname: name,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers
});
this.nodeView.render();
},
nodeInfo: function (id, initialized) {
this.checkUser();
if (!initialized || this.isCluster === undefined) {
this.waitForInit(this.nodeInfo.bind(this), id);
return;
}
if (this.isCluster === false) {
this.routes[''] = 'dashboard';
this.navigate('#dashboard', {trigger: true});
return;
}
if (this.nodeInfoView) {
this.nodeInfoView.remove();
}
this.nodeInfoView = new window.NodeInfoView({
nodeId: id,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers[0]
});
this.nodeInfoView.render();
},
shards: function (initialized) {
this.checkUser();
if (!initialized || this.isCluster === undefined) {
@ -367,10 +392,11 @@
this.navigate('#dashboard', {trigger: true});
return;
}
if (!this.nodesView) {
this.nodesView = new window.NodesView({
});
if (this.nodesView) {
this.nodesView.remove();
}
this.nodesView = new window.NodesView({
});
this.nodesView.render();
},

View File

@ -0,0 +1,27 @@
<script id="nodeInfoView.ejs" type="text/template">
<div class="nodeInfoView">
<div class="modal-body">
<table id="serverInfoTable" class="arango-table">
<tbody>
<% _.each(entries, function (entry, name) { %>
<tr>
<th class="collectionInfoTh2"><%=name%></th>
<th class="collectionInfoTh">
<div id="server-<%=name%>" class="modal-text"><%=entry%></div>
</th>
<th>
<% if (entry.description) { %>
<th class="tooltipInfoTh">
<span class="tippy" title="<%=entry.description%>"></span>
</th>
<% } %>
</th>
</tr>
<% }); %>
</tbody>
</table>
</div>
</div>
</script>

View File

@ -47,10 +47,10 @@
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title" style="clear: both">
<div class="pure-table-row">
<div class="pure-u-9-24 left">Name</div>
<div class="pure-u-8-24 left">Endpoint</div>
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
<div class="pure-u-3-24 mid">Status</div>
<div class="pure-u-1-24 mid"></div>
<div class="pure-u-9-24 left">Endpoint</div>
<div class="pure-u-2-24 mid hide-small">Since</div>
<div class="pure-u-2-24 mid">Info</div>
<div class="pure-u-2-24 mid">Status</div>
</div>
</div>
@ -67,16 +67,17 @@
<i class="fa fa-trash-o"></i>
<% } %>
</div>
<div class="pure-u-8-24 left"><%= node.Endpoint %></div>
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
<div class="pure-u-3-24 hide-small mid"><%= formatted %></div>
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div>
<div class="pure-u-2-24 hide-small mid"><%= formatted %></div>
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
<% if(node.Status === 'GOOD') { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
<% } else { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<% } %>
</div>
@ -128,10 +129,10 @@
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title">
<div class="pure-table-row">
<div class="pure-u-9-24 left">Name</div>
<div class="pure-u-8-24 left">Endpoint</div>
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
<div class="pure-u-3-24 mid">Status</div>
<div class="pure-u-1-24 mid"></div>
<div class="pure-u-9-24 left">Endpoint</div>
<div class="pure-u-2-24 mid hide-small">Since</div>
<div class="pure-u-2-24 mid">Info</div>
<div class="pure-u-2-24 mid">Status</div>
</div>
</div>
<% } %>
@ -143,16 +144,17 @@
<div class="pure-table-row <%= disabled %>" id="<%= id %>">
<div class="pure-u-9-24 left"><%= node.ShortName %></div>
<div class="pure-u-8-24 left"><%= node.Endpoint %></div>
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
<div class="pure-u-3-24 mid hide-small"><%= formatted %></div>
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div>
<div class="pure-u-2-24 mid hide-small"><%= formatted %></div>
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
<% if(node.Status === 'GOOD') { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
<% } else { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<% } %>
</div>

View File

@ -0,0 +1,108 @@
/* jshint browser: true */
/* jshint unused: false */
/* global arangoHelper, $, Backbone, templateEngine, window */
(function () {
'use strict';
window.NodeInfoView = Backbone.View.extend({
el: '#content',
template: templateEngine.createTemplate('nodeInfoView.ejs'),
initialize: function (options) {
if (window.App.isCluster) {
this.nodeId = options.nodeId;
this.dbServers = options.dbServers;
this.coordinators = options.coordinators;
}
},
remove: function () {
this.$el.empty().off(); /* off to unbind the events */
this.stopListening();
this.unbind();
delete this.el;
return this;
},
render: function () {
this.$el.html(this.template.render({entries: []}));
var callback = function () {
this.continueRender();
this.breadcrumb(arangoHelper.getCoordinatorShortName(this.nodeId));
$(window).trigger('resize');
}.bind(this);
if (!this.initCoordDone) {
this.waitForCoordinators();
}
if (!this.initDBDone) {
this.waitForDBServers(callback);
} else {
this.nodeId = window.location.hash.split('/')[1];
this.coordinator = this.coordinators.findWhere({name: this.coordname});
callback();
}
},
continueRender: function () {
var model;
if (this.coordinator) {
model = this.coordinator.toJSON();
} else {
model = this.dbServer.toJSON();
}
var renderObj = {};
renderObj.Name = model.name;
renderObj.Address = model.address;
renderObj.Status = model.status;
renderObj.Protocol = model.protocol;
renderObj.Role = model.role;
this.$el.html(this.template.render({entries: renderObj}));
},
breadcrumb: function (name) {
$('#subNavigationBar .breadcrumb').html('Node: ' + name);
},
waitForCoordinators: function (callback) {
var self = this;
window.setTimeout(function () {
if (self.coordinators.length === 0) {
self.waitForCoordinators(callback);
} else {
self.coordinator = self.coordinators.findWhere({name: self.nodeId});
self.initCoordDone = true;
if (callback) {
callback();
}
}
}, 200);
},
waitForDBServers: function (callback) {
var self = this;
window.setTimeout(function () {
if (self.dbServers.length === 0) {
self.waitForDBServers(callback);
} else {
self.initDBDone = true;
self.dbServers.each(function (model) {
if (model.get('id') === self.nodeId) {
self.dbServer = model;
}
});
callback();
}
}, 200);
}
});
}());

View File

@ -30,6 +30,14 @@
}
},
remove: function () {
this.$el.empty().off(); /* off to unbind the events */
this.stopListening();
this.unbind();
delete this.el;
return this;
},
breadcrumb: function (name) {
$('#subNavigationBar .breadcrumb').html('Node: ' + name);
},

View File

@ -22,6 +22,14 @@
'keyup #plannedDBs': 'checkKey'
},
remove: function () {
this.$el.empty().off(); /* off to unbind the events */
this.stopListening();
this.unbind();
delete this.el;
return this;
},
checkKey: function (e) {
if (e.keyCode === 13) {
var self = this;
@ -121,11 +129,16 @@
},
navigateToNode: function (elem) {
var name = $(elem.currentTarget).attr('node').slice(0, -5);
if ($(elem.target).hasClass('fa-info-circle')) {
window.App.navigate('#nodeInfo/' + encodeURIComponent(name), {trigger: true});
return;
}
if ($(elem.currentTarget).hasClass('noHover')) {
return;
}
var name = $(elem.currentTarget).attr('node').slice(0, -5);
window.App.navigate('#node/' + encodeURIComponent(name), {trigger: true});
},

View File

@ -33,8 +33,9 @@
.pure-table-body {
.fa-check-circle,
.fa-info-circle,
.fa-exclamation-circle {
font-size: 15pt;
font-size: 13pt;
}
}

View File

@ -336,6 +336,7 @@ ArangoCollection.prototype.properties = function (properties) {
'keyOptions': false,
'indexBuckets': true,
'replicationFactor': false,
'distributeShardsLike': false,
};
var a;
@ -344,7 +345,7 @@ ArangoCollection.prototype.properties = function (properties) {
requestResult = this._database._connection.GET(this._baseurl('properties'));
arangosh.checkRequestResult(requestResult);
}else {
} else {
var body = {};
for (a in attributes) {

Some files were not shown because too many files have changed in this diff Show More