1
0
Fork 0

Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel

This commit is contained in:
Max Neunhoeffer 2017-04-26 09:44:42 +02:00
commit 3afd200599
80 changed files with 1292 additions and 1247 deletions

View File

@ -340,8 +340,8 @@ if (CMAKE_COMPILER_IS_CLANG)
endif ()
# need c++11
# XXX this should really be set on a per target level using cmake compile_features capabilties
set(CMAKE_CXX_STANDARD 11)
include(CheckCXX11Features)
# need threads
find_package(Threads REQUIRED)

View File

@ -100,20 +100,8 @@ Agent::~Agent() {
FATAL_ERROR_EXIT();
}
}
if (!isStopping()) {
{
CONDITION_LOCKER(guardW, _waitForCV);
guardW.broadcast();
}
{
CONDITION_LOCKER(guardA, _appendCV);
guardA.broadcast();
}
shutdown();
}
shutdown();
}

View File

@ -483,7 +483,11 @@ void Supervision::run() {
// that running the supervision does not make sense and will indeed
// lead to horrible errors:
while (!this->isStopping()) {
std::this_thread::sleep_for(std::chrono::duration<double>(5.0));
{
CONDITION_LOCKER(guard, _cv);
_cv.wait(static_cast<uint64_t>(1000000 * _frequency));
}
MUTEX_LOCKER(locker, _lock);
try {
_snapshot = _agent->readDB().get(_agencyPrefix);

View File

@ -414,6 +414,8 @@ target_link_libraries(${BIN_ARANGOD}
arangoserver
)
target_compile_features(${BIN_ARANGOD} PRIVATE cxx_constexpr)
install(
TARGETS ${BIN_ARANGOD}
RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}

View File

@ -514,7 +514,7 @@ void Index::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&
documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
for (auto const& it : documents) {
int status = insert(trx, it.first, it.second, false);
if (status != TRI_ERROR_NO_ERROR) {

View File

@ -250,7 +250,7 @@ class Index {
virtual void batchInsert(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr);
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
virtual int unload() = 0;

View File

@ -73,7 +73,7 @@ namespace {
class MMFilesIndexFillerTask : public basics::LocalTask {
public:
MMFilesIndexFillerTask(
basics::LocalTaskQueue* queue, transaction::Methods* trx, Index* idx,
std::shared_ptr<basics::LocalTaskQueue> queue, transaction::Methods* trx, Index* idx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents)
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
@ -1464,7 +1464,7 @@ bool MMFilesCollection::openIndex(VPackSlice const& description,
/// @brief initializes an index with a set of existing documents
void MMFilesCollection::fillIndex(
arangodb::basics::LocalTaskQueue* queue, transaction::Methods* trx,
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue, transaction::Methods* trx,
arangodb::Index* idx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
bool skipPersistent) {
@ -1554,12 +1554,13 @@ int MMFilesCollection::fillIndexes(
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
auto ioService = SchedulerFeature::SCHEDULER->ioService();
TRI_ASSERT(ioService != nullptr);
arangodb::basics::LocalTaskQueue queue(ioService);
PerformanceLogScope logScope(
std::string("fill-indexes-document-collection { collection: ") +
_logicalCollection->vocbase()->name() + "/" + _logicalCollection->name() +
" }, indexes: " + std::to_string(n - 1));
auto queue = std::make_shared<arangodb::basics::LocalTaskQueue>(ioService);
try {
TRI_ASSERT(!ServerState::instance()->isCoordinator());
@ -1594,12 +1595,12 @@ int MMFilesCollection::fillIndexes(
if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
continue;
}
fillIndex(&queue, trx, idx.get(), documents, skipPersistent);
fillIndex(queue, trx, idx.get(), documents, skipPersistent);
}
queue.dispatchAndWait();
queue->dispatchAndWait();
if (queue.status() != TRI_ERROR_NO_ERROR) {
if (queue->status() != TRI_ERROR_NO_ERROR) {
rollbackAll();
rolledBack = true;
}
@ -1626,7 +1627,7 @@ int MMFilesCollection::fillIndexes(
if (documents.size() == blockSize) {
// now actually fill the secondary indexes
insertInAllIndexes();
if (queue.status() != TRI_ERROR_NO_ERROR) {
if (queue->status() != TRI_ERROR_NO_ERROR) {
break;
}
documents.clear();
@ -1636,33 +1637,33 @@ int MMFilesCollection::fillIndexes(
}
// process the remainder of the documents
if (queue.status() == TRI_ERROR_NO_ERROR && !documents.empty()) {
if (queue->status() == TRI_ERROR_NO_ERROR && !documents.empty()) {
insertInAllIndexes();
}
} catch (arangodb::basics::Exception const& ex) {
queue.setStatus(ex.code());
queue->setStatus(ex.code());
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "caught exception while filling indexes: " << ex.what();
} catch (std::bad_alloc const&) {
queue.setStatus(TRI_ERROR_OUT_OF_MEMORY);
queue->setStatus(TRI_ERROR_OUT_OF_MEMORY);
} catch (std::exception const& ex) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "caught exception while filling indexes: " << ex.what();
queue.setStatus(TRI_ERROR_INTERNAL);
queue->setStatus(TRI_ERROR_INTERNAL);
} catch (...) {
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
<< "caught unknown exception while filling indexes";
queue.setStatus(TRI_ERROR_INTERNAL);
queue->setStatus(TRI_ERROR_INTERNAL);
}
if (queue.status() != TRI_ERROR_NO_ERROR && !rolledBack) {
if (queue->status() != TRI_ERROR_NO_ERROR && !rolledBack) {
try {
rollbackAll();
} catch (...) {
}
}
return queue.status();
return queue->status();
}
/// @brief opens an existing collection

View File

@ -398,7 +398,7 @@ class MMFilesCollection final : public PhysicalCollection {
bool openIndex(VPackSlice const& description, transaction::Methods* trx);
/// @brief initializes an index with all existing documents
void fillIndex(basics::LocalTaskQueue*, transaction::Methods*, Index*,
void fillIndex(std::shared_ptr<basics::LocalTaskQueue>, transaction::Methods*, Index*,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const&,
bool);

View File

@ -329,7 +329,7 @@ int MMFilesEdgeIndex::remove(transaction::Methods* trx,
void MMFilesEdgeIndex::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
if (documents.empty()) {
return;
}

View File

@ -111,7 +111,7 @@ class MMFilesEdgeIndex final : public Index {
void batchInsert(transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const&,
arangodb::basics::LocalTaskQueue*) override;
std::shared_ptr<arangodb::basics::LocalTaskQueue>) override;
int unload() override;

View File

@ -644,7 +644,7 @@ int MMFilesHashIndex::remove(transaction::Methods* trx,
void MMFilesHashIndex::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
if (_unique) {
batchInsertUnique(trx, documents, queue);
@ -760,7 +760,7 @@ int MMFilesHashIndex::insertUnique(transaction::Methods* trx,
void MMFilesHashIndex::batchInsertUnique(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
std::shared_ptr<std::vector<MMFilesHashIndexElement*>> elements;
elements.reset(new std::vector<MMFilesHashIndexElement*>());
@ -880,7 +880,7 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
void MMFilesHashIndex::batchInsertMulti(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
std::shared_ptr<std::vector<MMFilesHashIndexElement*>> elements;
elements.reset(new std::vector<MMFilesHashIndexElement*>());

View File

@ -173,7 +173,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
void batchInsert(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr) override;
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
int unload() override;
@ -205,7 +205,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
void batchInsertUnique(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr);
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
int insertMulti(transaction::Methods*, TRI_voc_rid_t,
arangodb::velocypack::Slice const&, bool isRollback);
@ -213,7 +213,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
void batchInsertMulti(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr);
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
int removeUniqueElement(transaction::Methods*, MMFilesHashIndexElement*,
bool);

View File

@ -1592,14 +1592,15 @@ int MMFilesRestReplicationHandler::processRestoreCollectionCoordinator(
if (dropExisting) {
int res = ci->dropCollectionCoordinator(dbName, col->cid_as_string(),
errorMsg, 0.0);
if (res == TRI_ERROR_FORBIDDEN) {
if (res == TRI_ERROR_FORBIDDEN ||
res == TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE) {
// some collections must not be dropped
res = truncateCollectionOnCoordinator(dbName, name);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg =
"unable to truncate collection (dropping is forbidden): " + name;
return res;
}
return res;
}
if (res != TRI_ERROR_NO_ERROR) {

File diff suppressed because it is too large Load Diff

View File

@ -23,6 +23,7 @@
/// @author Jan Christoph Uhde
////////////////////////////////////////////////////////////////////////////////
#include "Basics/StringRef.h"
#include "RocksDBEngine/RocksDBCommon.h"
#include "RocksDBEngine/RocksDBComparator.h"
#include "RocksDBEngine/RocksDBEngine.h"
@ -35,6 +36,7 @@
#include <rocksdb/comparator.h>
#include <rocksdb/convenience.h>
#include <rocksdb/utilities/transaction_db.h>
#include <velocypack/Iterator.h>
#include "Logger/Logger.h"
namespace arangodb {
@ -127,6 +129,68 @@ void uint64ToPersistent(std::string& p, uint64_t value) {
} while (++len < sizeof(uint64_t));
}
bool hasObjectIds(VPackSlice const& inputSlice) {
bool rv = false;
if (inputSlice.isObject()) {
for (auto const& objectPair :
arangodb::velocypack::ObjectIterator(inputSlice)) {
if (arangodb::StringRef(objectPair.key) == "objectId") {
return true;
}
rv = hasObjectIds(objectPair.value);
if (rv) {
return rv;
}
}
} else if (inputSlice.isArray()) {
for (auto const& slice : arangodb::velocypack::ArrayIterator(inputSlice)) {
if (rv) {
return rv;
}
rv = hasObjectIds(slice);
}
}
return rv;
}
VPackBuilder& stripObjectIdsImpl(VPackBuilder& builder, VPackSlice const& inputSlice) {
if (inputSlice.isObject()) {
builder.openObject();
for (auto const& objectPair :
arangodb::velocypack::ObjectIterator(inputSlice)) {
if (arangodb::StringRef(objectPair.key) == "objectId") {
continue;
}
builder.add(objectPair.key);
stripObjectIdsImpl(builder, objectPair.value);
}
builder.close();
} else if (inputSlice.isArray()) {
builder.openArray();
for (auto const& slice : arangodb::velocypack::ArrayIterator(inputSlice)) {
stripObjectIdsImpl(builder, slice);
}
builder.close();
} else {
builder.add(inputSlice);
}
return builder;
}
std::pair<VPackSlice, std::unique_ptr<VPackBuffer<uint8_t>>> stripObjectIds(
VPackSlice const& inputSlice, bool checkBeforeCopy) {
std::unique_ptr<VPackBuffer<uint8_t>> buffer = nullptr;
if (checkBeforeCopy) {
if (!hasObjectIds(inputSlice)) {
return {inputSlice, std::move(buffer)};
}
}
buffer.reset(new VPackBuffer<uint8_t>);
VPackBuilder builder(*buffer);
stripObjectIdsImpl(builder, inputSlice);
return {VPackSlice(buffer->data()), std::move(buffer)};
}
RocksDBTransactionState* toRocksTransactionState(transaction::Methods* trx) {
TRI_ASSERT(trx != nullptr);
TransactionState* state = trx->state();

View File

@ -88,6 +88,10 @@ arangodb::Result convertStatus(rocksdb::Status const&,
uint64_t uint64FromPersistent(char const* p);
void uint64ToPersistent(char* p, uint64_t value);
void uint64ToPersistent(std::string& out, uint64_t value);
std::pair<VPackSlice, std::unique_ptr<VPackBuffer<uint8_t>>> stripObjectIds(
VPackSlice const& inputSlice, bool checkBeforeCopy = true);
RocksDBTransactionState* toRocksTransactionState(transaction::Methods* trx);
rocksdb::TransactionDB* globalRocksDB();
RocksDBEngine* globalRocksEngine();

View File

@ -251,7 +251,7 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
void RocksDBEdgeIndex::batchInsert(
transaction::Methods* trx,
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
// acquire rocksdb transaction
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
rocksdb::Transaction* rtrx = state->rocksTransaction();

View File

@ -111,7 +111,7 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
void batchInsert(
transaction::Methods*,
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
arangodb::basics::LocalTaskQueue* queue = nullptr) override;
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
int drop() override;

View File

@ -30,12 +30,14 @@
#include "Basics/StaticStrings.h"
#include "Basics/Thread.h"
#include "Basics/VelocyPackHelper.h"
#include "Basics/build.h"
#include "GeneralServer/RestHandlerFactory.h"
#include "Logger/Logger.h"
#include "ProgramOptions/ProgramOptions.h"
#include "ProgramOptions/Section.h"
#include "RestHandler/RestHandlerCreator.h"
#include "RestServer/DatabasePathFeature.h"
#include "RestServer/ServerIdFeature.h"
#include "RestServer/ViewTypesFeature.h"
#include "RocksDBEngine/RocksDBBackgroundThread.h"
#include "RocksDBEngine/RocksDBCollection.h"
@ -54,6 +56,7 @@
#include "RocksDBEngine/RocksDBV8Functions.h"
#include "RocksDBEngine/RocksDBValue.h"
#include "RocksDBEngine/RocksDBView.h"
#include "VocBase/replication-applier.h"
#include "VocBase/ticks.h"
#include <rocksdb/convenience.h>
@ -300,7 +303,6 @@ void RocksDBEngine::getDatabases(arangodb::velocypack::Builder& result) {
rocksdb::ReadOptions readOptions;
std::unique_ptr<rocksdb::Iterator> iter(_db->NewIterator(readOptions));
result.openArray();
auto rSlice = rocksDBSlice(RocksDBEntryType::Database);
for (iter->Seek(rSlice); iter->Valid() && iter->key().starts_with(rSlice);
@ -849,6 +851,58 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
return it->second;
}
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder){
Result res;
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
if (!status.ok()) {
res = rocksutils::convertStatus(status).errorNumber();
return res;
}
builder.add(VPackValue(VPackValueType::Object)); // Base
rocksdb::SequenceNumber lastTick = _db->GetLatestSequenceNumber();
// "state" part
builder.add("state", VPackValue(VPackValueType::Object)); //open
builder.add("running", VPackValue(true));
builder.add("lastLogTick", VPackValue(std::to_string(lastTick)));
builder.add("lastUncommittedLogTick", VPackValue(std::to_string(lastTick)));
builder.add("totalEvents", VPackValue(0)); // s.numEvents + s.numEventsSync
builder.add("time", VPackValue(utilities::timeString()));
builder.close();
// "server" part
builder.add("server", VPackValue(VPackValueType::Object)); //open
builder.add("version", VPackValue(ARANGODB_VERSION));
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
builder.close();
// "clients" part
builder.add("clients", VPackValue(VPackValueType::Array)); //open
if(vocbase != nullptr) { //add clients
auto allClients = vocbase->getReplicationClients();
for (auto& it : allClients) {
// One client
builder.add(VPackValue(VPackValueType::Object));
builder.add("serverId", VPackValue(std::to_string(std::get<0>(it))));
char buffer[21];
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
builder.add("time", VPackValue(buffer));
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
builder.close();
}
}
builder.close(); // clients
builder.close(); // base
return res;
}
Result RocksDBEngine::dropDatabase(TRI_voc_tick_t id) {
using namespace rocksutils;
Result res;

View File

@ -248,6 +248,8 @@ class RocksDBEngine final : public StorageEngine {
void addCollectionMapping(uint64_t, TRI_voc_tick_t, TRI_voc_cid_t);
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
Result createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder);
private:
Result dropDatabase(TRI_voc_tick_t);
bool systemDatabaseExists();

View File

@ -48,6 +48,7 @@ double const RocksDBReplicationContext::DefaultTTL = 30 * 60.0;
RocksDBReplicationContext::RocksDBReplicationContext()
: _id(TRI_NewTickServer()),
_lastTick(0),
_currentTick(0),
_trx(),
_collection(nullptr),
_iter(),
@ -77,21 +78,26 @@ uint64_t RocksDBReplicationContext::count() const {
// creates new transaction/snapshot
void RocksDBReplicationContext::bind(TRI_vocbase_t* vocbase) {
releaseDumpingResources();
_trx = createTransaction(vocbase);
if ((_trx.get() == nullptr) || (_trx->vocbase() != vocbase)) {
releaseDumpingResources();
_trx = createTransaction(vocbase);
}
}
int RocksDBReplicationContext::bindCollection(
std::string const& collectionName) {
if ((_collection == nullptr) || _collection->name() != collectionName) {
if ((_collection == nullptr) ||
((_collection->name() != collectionName) &&
std::to_string(_collection->cid()) != collectionName)) {
_collection = _trx->vocbase()->lookupCollection(collectionName);
if (_collection == nullptr) {
return TRI_ERROR_BAD_PARAMETER;
}
_trx->addCollectionAtRuntime(collectionName);
_iter = _collection->getAllIterator(_trx.get(), &_mdr,
false); //_mdr is not used nor updated
_currentTick = 1;
_hasMore = true;
}
return TRI_ERROR_NO_ERROR;
@ -174,13 +180,19 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
try {
_hasMore = _iter->next(cb, 10); // TODO: adjust limit?
} catch (std::exception const& ex) {
_hasMore = false;
return RocksDBReplicationResult(TRI_ERROR_INTERNAL, _lastTick);
} catch (RocksDBReplicationResult const& ex) {
_hasMore = false;
return ex;
}
}
return RocksDBReplicationResult(TRI_ERROR_NO_ERROR, _lastTick);
if (_hasMore) {
_currentTick++;
}
return RocksDBReplicationResult(TRI_ERROR_NO_ERROR, _currentTick);
}
arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,

View File

@ -106,6 +106,7 @@ class RocksDBReplicationContext {
private:
TRI_voc_tick_t _id;
uint64_t _lastTick;
uint64_t _currentTick;
std::unique_ptr<transaction::Methods> _trx;
LogicalCollection* _collection;
std::unique_ptr<IndexIterator> _iter;

View File

@ -328,57 +328,12 @@ bool RocksDBRestReplicationHandler::isCoordinatorError() {
void RocksDBRestReplicationHandler::handleCommandLoggerState() {
VPackBuilder builder;
builder.add(VPackValue(VPackValueType::Object)); // Base
// MMFilesLogfileManager::instance()->waitForSync(10.0);
// MMFilesLogfileManagerState const s =
// MMFilesLogfileManager::instance()->state();
rocksdb::TransactionDB* db =
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
if (!status.ok()) {
Result res = rocksutils::convertStatus(status).errorNumber();
auto res = globalRocksEngine()->createLoggerState(_vocbase, builder);
if (res.fail()) {
generateError(rest::ResponseCode::BAD, res.errorNumber(),
res.errorMessage());
return;
}
rocksdb::SequenceNumber lastTick = latestSequenceNumber();
// "state" part
builder.add("state", VPackValue(VPackValueType::Object));
builder.add("running", VPackValue(true));
builder.add("lastLogTick", VPackValue(StringUtils::itoa(lastTick)));
builder.add("lastUncommittedLogTick",
VPackValue(StringUtils::itoa(lastTick + 1)));
builder.add("totalEvents", VPackValue(0)); // s.numEvents + s.numEventsSync
builder.add("time", VPackValue(utilities::timeString()));
builder.close();
// "server" part
builder.add("server", VPackValue(VPackValueType::Object));
builder.add("version", VPackValue(ARANGODB_VERSION));
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
builder.close();
// "clients" part
builder.add("clients", VPackValue(VPackValueType::Array));
auto allClients = _vocbase->getReplicationClients();
for (auto& it : allClients) {
// One client
builder.add(VPackValue(VPackValueType::Object));
builder.add("serverId", VPackValue(std::to_string(std::get<0>(it))));
char buffer[21];
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
builder.add("time", VPackValue(buffer));
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
builder.close();
}
builder.close(); // clients
builder.close(); // base
generateResult(rest::ResponseCode::OK, builder.slice());
}
@ -871,7 +826,8 @@ void RocksDBRestReplicationHandler::handleCommandRestoreCollection() {
"invalid JSON");
return;
}
VPackSlice const slice = parsedRequest->slice();
auto pair = stripObjectIds(parsedRequest->slice());
VPackSlice const slice = pair.first;
bool overwrite = false;
@ -1775,14 +1731,15 @@ int RocksDBRestReplicationHandler::processRestoreCollectionCoordinator(
if (dropExisting) {
int res = ci->dropCollectionCoordinator(dbName, col->cid_as_string(),
errorMsg, 0.0);
if (res == TRI_ERROR_FORBIDDEN) {
if (res == TRI_ERROR_FORBIDDEN ||
res == TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE) {
// some collections must not be dropped
res = truncateCollectionOnCoordinator(dbName, name);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg =
"unable to truncate collection (dropping is forbidden): " + name;
return res;
}
return res;
}
if (res != TRI_ERROR_NO_ERROR) {

View File

@ -63,46 +63,44 @@ static void JS_StateLoggerReplication(
v8::HandleScope scope(isolate);
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
v8::Handle<v8::Object> result = v8::Object::New(isolate);
v8::Handle<v8::Object> state = v8::Object::New(isolate);
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
if(engineName == "mmfiles"){
v8::Handle<v8::Object> state = v8::Object::New(isolate);
MMFilesLogfileManagerState const s = MMFilesLogfileManager::instance()->state();
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastCommittedTick));
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastAssignedTick));
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
v8::Number::New(isolate, static_cast<double>(s.numEvents + s.numEventsSync)));
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(s.timeString));
result->Set(TRI_V8_ASCII_STRING("state"), state);
v8::Handle<v8::Object> server = v8::Object::New(isolate);
server->Set(TRI_V8_ASCII_STRING("version"),
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
server->Set(TRI_V8_ASCII_STRING("serverId"),
TRI_V8_STD_STRING(StringUtils::itoa(ServerIdFeature::getId())));
result->Set(TRI_V8_ASCII_STRING("server"), server);
v8::Handle<v8::Object> clients = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("clients"), clients);
} else if (engineName == "rocksdb") {
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
uint64_t lastTick = db->GetLatestSequenceNumber();
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"),
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
v8::Number::New(isolate, static_cast<double>(0))); //s.numEvents + s.numEventsSync)));
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(utilities::timeString()));
VPackBuilder builder;
auto res = rocksutils::globalRocksEngine()->createLoggerState(nullptr,builder);
if(res.fail()){
TRI_V8_THROW_EXCEPTION(res);
return;
}
v8::Handle<v8::Value>resultValue = TRI_VPackToV8(isolate, builder.slice());
result = v8::Handle<v8::Object>::Cast(resultValue);
} else {
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid storage engine");
return;
}
v8::Handle<v8::Object> result = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("state"), state);
v8::Handle<v8::Object> server = v8::Object::New(isolate);
server->Set(TRI_V8_ASCII_STRING("version"),
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
server->Set(TRI_V8_ASCII_STRING("serverId"),
TRI_V8_STD_STRING(StringUtils::itoa(ServerIdFeature::getId())));
result->Set(TRI_V8_ASCII_STRING("server"), server);
v8::Handle<v8::Object> clients = v8::Object::New(isolate);
result->Set(TRI_V8_ASCII_STRING("clients"), clients);
TRI_V8_RETURN(result);
TRI_V8_TRY_CATCH_END
}

View File

@ -1,160 +0,0 @@
# - Check which parts of the C++11 standard the compiler supports
#
# When found it will set the following variables
#
# CXX11_COMPILER_FLAGS - the compiler flags needed to get C++11 features
#
# HAS_CXX11_AUTO - auto keyword
# HAS_CXX11_AUTO_RET_TYPE - function declaration with deduced return types
# HAS_CXX11_CLASS_OVERRIDE - override and final keywords for classes and methods
# HAS_CXX11_CONSTEXPR - constexpr keyword
# HAS_CXX11_CSTDINT_H - cstdint header
# HAS_CXX11_DECLTYPE - decltype keyword
# HAS_CXX11_FUNC - __func__ preprocessor constant
# HAS_CXX11_INITIALIZER_LIST - initializer list
# HAS_CXX11_LAMBDA - lambdas
# HAS_CXX11_LIB_REGEX - regex library
# HAS_CXX11_LONG_LONG - long long signed & unsigned types
# HAS_CXX11_NULLPTR - nullptr
# HAS_CXX11_RVALUE_REFERENCES - rvalue references
# HAS_CXX11_SIZEOF_MEMBER - sizeof() non-static members
# HAS_CXX11_STATIC_ASSERT - static_assert()
# HAS_CXX11_VARIADIC_TEMPLATES - variadic templates
# HAS_CXX11_SHARED_PTR - Shared Pointer
# HAS_CXX11_THREAD - thread
# HAS_CXX11_MUTEX - mutex
# HAS_CXX11_NOEXCEPT - noexcept
# HAS_CXX11_CONDITIONAL - conditional type definitions
#=============================================================================
# Copyright 2011,2012 Rolf Eike Beer <eike@sf-mail.de>
# Copyright 2012 Andreas Weis
# Copyright 2014 Kaveh Vahedipour <kaveh@codeare.org>
#
# Distributed under the OSI-approved BSD License (the "License");
# see accompanying file Copyright.txt for details.
#
# This software is distributed WITHOUT ANY WARRANTY; without even the
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the License for more information.
#=============================================================================
# (To distribute this file outside of CMake, substitute the full
# License text for the above reference.)
#
# Each feature may have up to 3 checks, every one of them in it's own file
# FEATURE.cpp - example that must build and return 0 when run
# FEATURE_fail.cpp - example that must build, but may not return 0 when run
# FEATURE_fail_compile.cpp - example that must fail compilation
#
# The first one is mandatory, the latter 2 are optional and do not depend on
# each other (i.e. only one may be present).
#
# Modification for std::thread (Kaveh Vahdipour, Forschungszentrum Juelich)
#
IF (NOT CMAKE_CXX_COMPILER_LOADED)
message(FATAL_ERROR "CheckCXX11Features modules only works if language CXX is enabled")
endif ()
cmake_minimum_required(VERSION 2.8.3)
#
### Check for needed compiler flags
#
include(CheckCXXCompilerFlag)
check_cxx_compiler_flag("-std=c++11" _HAS_CXX11_FLAG)
if (NOT _HAS_CXX11_FLAG)
check_cxx_compiler_flag("-std=c++0x" _HAS_CXX0X_FLAG)
endif ()
if (_HAS_CXX11_FLAG)
set(CXX11_COMPILER_FLAGS "-std=c++11")
elseif (_HAS_CXX0X_FLAG)
set(CXX11_COMPILER_FLAGS "-std=c++0x")
endif ()
function(cxx11_check_feature FEATURE_NAME RESULT_VAR)
if (NOT DEFINED ${RESULT_VAR})
set(_bindir "${CMAKE_CURRENT_BINARY_DIR}/cxx11/${FEATURE_NAME}")
set(_SRCFILE_BASE ${CMAKE_CURRENT_LIST_DIR}/CheckCXX11Features/cxx11-test-${FEATURE_NAME})
set(_LOG_NAME "\"${FEATURE_NAME}\"")
message(STATUS "Checking C++11 support for ${_LOG_NAME}")
set(_SRCFILE "${_SRCFILE_BASE}.cpp")
set(_SRCFILE_FAIL "${_SRCFILE_BASE}_fail.cpp")
set(_SRCFILE_FAIL_COMPILE "${_SRCFILE_BASE}_fail_compile.cpp")
if (CROSS_COMPILING)
try_compile(${RESULT_VAR} "${_bindir}" "${_SRCFILE}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
try_compile(${RESULT_VAR} "${_bindir}_fail" "${_SRCFILE_FAIL}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
else (CROSS_COMPILING)
try_run(_RUN_RESULT_VAR _COMPILE_RESULT_VAR
"${_bindir}" "${_SRCFILE}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
set(${RESULT_VAR} TRUE)
else (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
set(${RESULT_VAR} FALSE)
endif (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
try_run(_RUN_RESULT_VAR _COMPILE_RESULT_VAR
"${_bindir}_fail" "${_SRCFILE_FAIL}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
set(${RESULT_VAR} TRUE)
else (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
set(${RESULT_VAR} FALSE)
endif (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
endif (CROSS_COMPILING)
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL_COMPILE})
try_compile(_TMP_RESULT "${_bindir}_fail_compile" "${_SRCFILE_FAIL_COMPILE}"
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
if (_TMP_RESULT)
set(${RESULT_VAR} FALSE)
else (_TMP_RESULT)
set(${RESULT_VAR} TRUE)
endif (_TMP_RESULT)
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL_COMPILE})
if (${RESULT_VAR})
message(STATUS "Checking C++11 support for ${_LOG_NAME}: works")
else (${RESULT_VAR})
message(FATAL_ERROR "Checking C++11 support for ${_LOG_NAME}: not supported")
endif (${RESULT_VAR})
set(${RESULT_VAR} ${${RESULT_VAR}} CACHE INTERNAL "C++11 support for ${_LOG_NAME}")
endif (NOT DEFINED ${RESULT_VAR})
endfunction(cxx11_check_feature)
cxx11_check_feature("__func__" HAS_CXX11_FUNC)
cxx11_check_feature("auto" HAS_CXX11_AUTO)
cxx11_check_feature("auto_ret_type" HAS_CXX11_AUTO_RET_TYPE)
#cxx11_check_feature("atomic_uint_fast16_t" HAS_CXX11_ATOMIC_UINT_FAST16_T)
cxx11_check_feature("class_override_final" HAS_CXX11_CLASS_OVERRIDE)
cxx11_check_feature("constexpr" HAS_CXX11_CONSTEXPR)
cxx11_check_feature("conditional" HAS_CXX11_CONDITIONAL)
#cxx11_check_feature("cstdint" HAS_CXX11_CSTDINT_H)
cxx11_check_feature("decltype" HAS_CXX11_DECLTYPE)
cxx11_check_feature("initializer_list" HAS_CXX11_INITIALIZER_LIST)
cxx11_check_feature("lambda" HAS_CXX11_LAMBDA)
cxx11_check_feature("range_based_for_loop" HAS_CXX11_RANGE_BASED_FOR_LOOP)
#cxx11_check_feature("long_long" HAS_CXX11_LONG_LONG)
cxx11_check_feature("nullptr" HAS_CXX11_NULLPTR)
cxx11_check_feature("tuple" HAS_CXX11_TUPLE)
cxx11_check_feature("regex" HAS_CXX11_LIB_REGEX)
cxx11_check_feature("rvalue-references" HAS_CXX11_RVALUE_REFERENCES)
cxx11_check_feature("sizeof_member" HAS_CXX11_SIZEOF_MEMBER)
cxx11_check_feature("static_assert" HAS_CXX11_STATIC_ASSERT)
cxx11_check_feature("variadic_templates" HAS_CXX11_VARIADIC_TEMPLATES)
cxx11_check_feature("shared_ptr" HAS_CXX11_SHARED_PTR)
cxx11_check_feature("unique_ptr" HAS_CXX11_UNIQUE_PTR)
cxx11_check_feature("weak_ptr" HAS_CXX11_WEAK_PTR)
cxx11_check_feature("thread" HAS_CXX11_THREAD)
cxx11_check_feature("mutex" HAS_CXX11_MUTEX)
cxx11_check_feature("noexcept" HAS_CXX11_NOEXCEPT)

View File

@ -1,8 +0,0 @@
int main(void)
{
if (!__func__)
return 1;
if (!(*__func__))
return 1;
return 0;
}

View File

@ -1,6 +0,0 @@
#include <atomic>
int main () {
std::atomic_uint_fast16_t a;
return 0;
}

View File

@ -1,12 +0,0 @@
int main()
{
auto i = 5;
auto f = 3.14159f;
auto d = 3.14159;
bool ret = (
(sizeof(f) < sizeof(d)) &&
(sizeof(i) == sizeof(int))
);
return ret ? 0 : 1;
}

View File

@ -1,7 +0,0 @@
int main(void)
{
// must fail because there is no initializer
auto i;
return 0;
}

View File

@ -1,8 +0,0 @@
auto foo(int i) -> int {
return i - 1;
}
int main()
{
return foo(1);
}

View File

@ -1,28 +0,0 @@
class base {
public:
virtual int foo(int a)
{ return 4 + a; }
int bar(int a)
{ return a - 2; }
};
class sub final : public base {
public:
virtual int foo(int a) override
{ return 8 + 2 * a; };
};
class sub2 final : public base {
public:
virtual int foo(int a) override final
{ return 8 + 2 * a; };
};
int main(void)
{
base b;
sub s;
sub2 t;
return (b.foo(2) * 2 == s.foo(2) && b.foo(2) * 2 == t.foo(2) ) ? 0 : 1;
}

View File

@ -1,25 +0,0 @@
class base {
public:
virtual int foo(int a)
{ return 4 + a; }
virtual int bar(int a) final
{ return a - 2; }
};
class sub final : public base {
public:
virtual int foo(int a) override
{ return 8 + 2 * a; };
virtual int bar(int a)
{ return a; }
};
class impossible : public sub { };
int main(void)
{
base b;
sub s;
return 1;
}

View File

@ -1,17 +0,0 @@
#include <type_traits>
#include <string>
template<class T> class A {
public:
typedef typename std::conditional<false, const std::string, std::string>::type StringType;
A() : s(""), t(0) {}
virtual ~A () {}
private:
StringType s;
T t;
};
int main() {
A<float> a;
return 0;
}

View File

@ -1,19 +0,0 @@
constexpr int square(int x)
{
return x*x;
}
constexpr int the_answer()
{
return 42;
}
int main()
{
int test_arr[square(3)];
bool ret = (
(square(the_answer()) == 1764) &&
(sizeof(test_arr)/sizeof(test_arr[0]) == 9)
);
return ret ? 0 : 1;
}

View File

@ -1,11 +0,0 @@
#include <cstdint>
int main()
{
bool test =
(sizeof(int8_t) == 1) &&
(sizeof(int16_t) == 2) &&
(sizeof(int32_t) == 4) &&
(sizeof(int64_t) == 8);
return test ? 0 : 1;
}

View File

@ -1,10 +0,0 @@
bool check_size(int i)
{
return sizeof(int) == sizeof(decltype(i));
}
int main()
{
bool ret = check_size(42);
return ret ? 0 : 1;
}

View File

@ -1,27 +0,0 @@
#include <vector>
class seq {
public:
seq(std::initializer_list<int> list);
int length() const;
private:
std::vector<int> m_v;
};
seq::seq(std::initializer_list<int> list)
: m_v(list)
{
}
int seq::length() const
{
return m_v.size();
}
int main(void)
{
seq a = {18, 20, 2, 0, 4, 7};
return (a.length() == 6) ? 0 : 1;
}

View File

@ -1,5 +0,0 @@
int main()
{
int ret = 0;
return ([&ret]() -> int { return ret; })();
}

View File

@ -1,7 +0,0 @@
int main(void)
{
long long l;
unsigned long long ul;
return ((sizeof(l) >= 8) && (sizeof(ul) >= 8)) ? 0 : 1;
}

View File

@ -1,6 +0,0 @@
#include <mutex>
int main() {
std::mutex _mutex;
return 0;
}

View File

@ -1,8 +0,0 @@
volatile void dummy () noexcept {
int a = 0;
}
int main () {
dummy();
return 0;
}

View File

@ -1,6 +0,0 @@
int main(void)
{
void *v = nullptr;
return v ? 1 : 0;
}

View File

@ -1,6 +0,0 @@
int main(void)
{
int i = nullptr;
return 1;
}

View File

@ -1,15 +0,0 @@
int main() {
int my_array[5] = {1, 2, 3, 4, 5};
for (int &x : my_array) {
x *= 2;
}
for (auto &x : my_array) {
x *= 2;
}
}

View File

@ -1,26 +0,0 @@
#include <algorithm>
#include <regex>
int parse_line(std::string const& line)
{
std::string tmp;
if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+//(-)?(\\d)+(\\s)+"))) {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+//(-)?(\\d)+"), std::string("V"));
} else if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+/(-)?(\\d)+(\\s)+"))) {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+/(-)?(\\d)+"), std::string("V"));
} else if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+/(-)?(\\d)+/(-)?(\\d)+(\\s)+"))) {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+/(-)?(\\d)+/(-)?(\\d)+"), std::string("V"));
} else {
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+"), std::string("V"));
}
return static_cast<int>(std::count(tmp.begin(), tmp.end(), 'V'));
}
int main()
{
bool test = (parse_line("f 7/7/7 -3/3/-3 2/-2/2") == 3) &&
(parse_line("f 7//7 3//-3 -2//2") == 3) &&
(parse_line("f 7/7 3/-3 -2/2") == 3) &&
(parse_line("f 7 3 -2") == 3);
return test ? 0 : 1;
}

View File

@ -1,57 +0,0 @@
#include <cassert>
class rvmove {
public:
void *ptr;
char *array;
rvmove()
: ptr(0),
array(new char[10])
{
ptr = this;
}
rvmove(rvmove &&other)
: ptr(other.ptr),
array(other.array)
{
other.array = 0;
other.ptr = 0;
}
~rvmove()
{
assert(((ptr != 0) && (array != 0)) || ((ptr == 0) && (array == 0)));
delete[] array;
}
rvmove &operator=(rvmove &&other)
{
delete[] array;
ptr = other.ptr;
array = other.array;
other.array = 0;
other.ptr = 0;
return *this;
}
static rvmove create()
{
return rvmove();
}
private:
rvmove(const rvmove &);
rvmove &operator=(const rvmove &);
};
int main()
{
rvmove mine;
if (mine.ptr != &mine)
return 1;
mine = rvmove::create();
if (mine.ptr == &mine)
return 1;
return 0;
}

View File

@ -1,6 +0,0 @@
#include <memory>
int main() {
std::shared_ptr<int> test;
return 0;
}

View File

@ -1,14 +0,0 @@
struct foo {
char bar;
int baz;
};
int main(void)
{
bool ret = (
(sizeof(foo::bar) == 1) &&
(sizeof(foo::baz) >= sizeof(foo::bar)) &&
(sizeof(foo) >= sizeof(foo::bar) + sizeof(foo::baz))
);
return ret ? 0 : 1;
}

View File

@ -1,9 +0,0 @@
struct foo {
int baz;
double bar;
};
int main(void)
{
return (sizeof(foo::bar) == 4) ? 0 : 1;
}

View File

@ -1,5 +0,0 @@
int main(void)
{
static_assert(0 < 1, "your ordering of integers is screwed");
return 0;
}

View File

@ -1,5 +0,0 @@
int main(void)
{
static_assert(1 < 0, "your ordering of integers is screwed");
return 0;
}

View File

@ -1,6 +0,0 @@
#include <thread>
int main() {
std::thread test;
return 0;
}

View File

@ -1,10 +0,0 @@
#include <tuple>
int main () {
typedef std::tuple <int, double, long &, const char *> test_tuple;
long lengthy = 12;
test_tuple proof (18, 6.5, lengthy, "Ciao!");
lengthy = std::get<0>(proof);
std::get<3>(proof) = " Beautiful!";
return 0;
}

View File

@ -1,6 +0,0 @@
#include <memory>
int main() {
std::unique_ptr<int> test;
return 0;
}

View File

@ -1,23 +0,0 @@
int Accumulate()
{
return 0;
}
template<typename T, typename... Ts>
int Accumulate(T v, Ts... vs)
{
return v + Accumulate(vs...);
}
template<int... Is>
int CountElements()
{
return sizeof...(Is);
}
int main()
{
int acc = Accumulate(1, 2, 3, 4, -5);
int count = CountElements<1,2,3,4,5>();
return ((acc == 5) && (count == 5)) ? 0 : 1;
}

View File

@ -1,6 +0,0 @@
#include <memory>
int main() {
std::weak_ptr<int> test;
return 0;
}

View File

@ -74,7 +74,9 @@ macro (install_readme input output)
if (MSVC)
set(CRLFSTYLE "CRLF")
endif ()
configure_file(${PROJECT_SOURCE_DIR}/${input} "${PROJECT_BINARY_DIR}/${output}" NEWLINE_STYLE ${CRLFSTYLE})
install(
CODE "configure_file(${PROJECT_SOURCE_DIR}/${input} \"${PROJECT_BINARY_DIR}/${output}\" NEWLINE_STYLE ${CRLFSTYLE})")
install(
FILES "${PROJECT_BINARY_DIR}/${output}"
DESTINATION "${where}"

View File

@ -64,10 +64,13 @@ function collectionRepresentation(collection, showProperties, showCount, showFig
result.indexBuckets = properties.indexBuckets;
if (cluster.isCoordinator()) {
result.shardKeys = properties.shardKeys;
result.avoidServers = properties.avoidServers;
result.distributeShardsLike = properties.distributeShardsLike;
result.numberOfShards = properties.numberOfShards;
result.replicationFactor = properties.replicationFactor;
result.avoidServers = properties.avoidServers;
result.distributeShardsLike = properties.distributeShardsLike;
result.shardKeys = properties.shardKeys;
}
}

View File

@ -39,6 +39,7 @@
'nodes': 'nodes',
'shards': 'shards',
'node/:name': 'node',
'nodeInfo/:id': 'nodeInfo',
'logs': 'logger',
'helpus': 'helpUs',
'graph/:name': 'graph',
@ -327,16 +328,40 @@
return;
}
if (!this.nodeView) {
this.nodeView = new window.NodeView({
coordname: name,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers
});
if (this.nodeView) {
this.nodeView.remove();
}
this.nodeView = new window.NodeView({
coordname: name,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers
});
this.nodeView.render();
},
nodeInfo: function (id, initialized) {
this.checkUser();
if (!initialized || this.isCluster === undefined) {
this.waitForInit(this.nodeInfo.bind(this), id);
return;
}
if (this.isCluster === false) {
this.routes[''] = 'dashboard';
this.navigate('#dashboard', {trigger: true});
return;
}
if (this.nodeInfoView) {
this.nodeInfoView.remove();
}
this.nodeInfoView = new window.NodeInfoView({
nodeId: id,
coordinators: this.coordinatorCollection,
dbServers: this.dbServers[0]
});
this.nodeInfoView.render();
},
shards: function (initialized) {
this.checkUser();
if (!initialized || this.isCluster === undefined) {
@ -367,10 +392,11 @@
this.navigate('#dashboard', {trigger: true});
return;
}
if (!this.nodesView) {
this.nodesView = new window.NodesView({
});
if (this.nodesView) {
this.nodesView.remove();
}
this.nodesView = new window.NodesView({
});
this.nodesView.render();
},

View File

@ -0,0 +1,27 @@
<script id="nodeInfoView.ejs" type="text/template">
<div class="nodeInfoView">
<div class="modal-body">
<table id="serverInfoTable" class="arango-table">
<tbody>
<% _.each(entries, function (entry, name) { %>
<tr>
<th class="collectionInfoTh2"><%=name%></th>
<th class="collectionInfoTh">
<div id="server-<%=name%>" class="modal-text"><%=entry%></div>
</th>
<th>
<% if (entry.description) { %>
<th class="tooltipInfoTh">
<span class="tippy" title="<%=entry.description%>"></span>
</th>
<% } %>
</th>
</tr>
<% }); %>
</tbody>
</table>
</div>
</div>
</script>

View File

@ -47,10 +47,10 @@
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title" style="clear: both">
<div class="pure-table-row">
<div class="pure-u-9-24 left">Name</div>
<div class="pure-u-8-24 left">Endpoint</div>
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
<div class="pure-u-3-24 mid">Status</div>
<div class="pure-u-1-24 mid"></div>
<div class="pure-u-9-24 left">Endpoint</div>
<div class="pure-u-2-24 mid hide-small">Since</div>
<div class="pure-u-2-24 mid">Info</div>
<div class="pure-u-2-24 mid">Status</div>
</div>
</div>
@ -67,16 +67,17 @@
<i class="fa fa-trash-o"></i>
<% } %>
</div>
<div class="pure-u-8-24 left"><%= node.Endpoint %></div>
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
<div class="pure-u-3-24 hide-small mid"><%= formatted %></div>
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div>
<div class="pure-u-2-24 hide-small mid"><%= formatted %></div>
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
<% if(node.Status === 'GOOD') { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
<% } else { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<% } %>
</div>
@ -128,10 +129,10 @@
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title">
<div class="pure-table-row">
<div class="pure-u-9-24 left">Name</div>
<div class="pure-u-8-24 left">Endpoint</div>
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
<div class="pure-u-3-24 mid">Status</div>
<div class="pure-u-1-24 mid"></div>
<div class="pure-u-9-24 left">Endpoint</div>
<div class="pure-u-2-24 mid hide-small">Since</div>
<div class="pure-u-2-24 mid">Info</div>
<div class="pure-u-2-24 mid">Status</div>
</div>
</div>
<% } %>
@ -143,16 +144,17 @@
<div class="pure-table-row <%= disabled %>" id="<%= id %>">
<div class="pure-u-9-24 left"><%= node.ShortName %></div>
<div class="pure-u-8-24 left"><%= node.Endpoint %></div>
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
<div class="pure-u-3-24 mid hide-small"><%= formatted %></div>
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div>
<div class="pure-u-2-24 mid hide-small"><%= formatted %></div>
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
<% if(node.Status === 'GOOD') { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
<% } else { %>
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
<% } %>
</div>

View File

@ -0,0 +1,108 @@
/* jshint browser: true */
/* jshint unused: false */
/* global arangoHelper, $, Backbone, templateEngine, window */
(function () {
'use strict';
window.NodeInfoView = Backbone.View.extend({
el: '#content',
template: templateEngine.createTemplate('nodeInfoView.ejs'),
initialize: function (options) {
if (window.App.isCluster) {
this.nodeId = options.nodeId;
this.dbServers = options.dbServers;
this.coordinators = options.coordinators;
}
},
remove: function () {
this.$el.empty().off(); /* off to unbind the events */
this.stopListening();
this.unbind();
delete this.el;
return this;
},
render: function () {
this.$el.html(this.template.render({entries: []}));
var callback = function () {
this.continueRender();
this.breadcrumb(arangoHelper.getCoordinatorShortName(this.nodeId));
$(window).trigger('resize');
}.bind(this);
if (!this.initCoordDone) {
this.waitForCoordinators();
}
if (!this.initDBDone) {
this.waitForDBServers(callback);
} else {
this.nodeId = window.location.hash.split('/')[1];
this.coordinator = this.coordinators.findWhere({name: this.coordname});
callback();
}
},
continueRender: function () {
var model;
if (this.coordinator) {
model = this.coordinator.toJSON();
} else {
model = this.dbServer.toJSON();
}
var renderObj = {};
renderObj.Name = model.name;
renderObj.Address = model.address;
renderObj.Status = model.status;
renderObj.Protocol = model.protocol;
renderObj.Role = model.role;
this.$el.html(this.template.render({entries: renderObj}));
},
breadcrumb: function (name) {
$('#subNavigationBar .breadcrumb').html('Node: ' + name);
},
waitForCoordinators: function (callback) {
var self = this;
window.setTimeout(function () {
if (self.coordinators.length === 0) {
self.waitForCoordinators(callback);
} else {
self.coordinator = self.coordinators.findWhere({name: self.nodeId});
self.initCoordDone = true;
if (callback) {
callback();
}
}
}, 200);
},
waitForDBServers: function (callback) {
var self = this;
window.setTimeout(function () {
if (self.dbServers.length === 0) {
self.waitForDBServers(callback);
} else {
self.initDBDone = true;
self.dbServers.each(function (model) {
if (model.get('id') === self.nodeId) {
self.dbServer = model;
}
});
callback();
}
}, 200);
}
});
}());

View File

@ -30,6 +30,14 @@
}
},
remove: function () {
this.$el.empty().off(); /* off to unbind the events */
this.stopListening();
this.unbind();
delete this.el;
return this;
},
breadcrumb: function (name) {
$('#subNavigationBar .breadcrumb').html('Node: ' + name);
},

View File

@ -22,6 +22,14 @@
'keyup #plannedDBs': 'checkKey'
},
remove: function () {
this.$el.empty().off(); /* off to unbind the events */
this.stopListening();
this.unbind();
delete this.el;
return this;
},
checkKey: function (e) {
if (e.keyCode === 13) {
var self = this;
@ -121,11 +129,16 @@
},
navigateToNode: function (elem) {
var name = $(elem.currentTarget).attr('node').slice(0, -5);
if ($(elem.target).hasClass('fa-info-circle')) {
window.App.navigate('#nodeInfo/' + encodeURIComponent(name), {trigger: true});
return;
}
if ($(elem.currentTarget).hasClass('noHover')) {
return;
}
var name = $(elem.currentTarget).attr('node').slice(0, -5);
window.App.navigate('#node/' + encodeURIComponent(name), {trigger: true});
},

View File

@ -33,8 +33,9 @@
.pure-table-body {
.fa-check-circle,
.fa-info-circle,
.fa-exclamation-circle {
font-size: 15pt;
font-size: 13pt;
}
}

View File

@ -80,6 +80,15 @@ function analyzeCoreDump (instanceInfo, options, storeArangodPath, pid) {
executeExternalAndWait('/bin/bash', args);
GDB_OUTPUT = fs.read(gdbOutputFile);
print(GDB_OUTPUT);
command = 'gdb ' + storeArangodPath + ' ';
if (options.coreDirectory === '') {
command += 'core';
} else {
command += options.coreDirectory;
}
return command;
}
// //////////////////////////////////////////////////////////////////////////////
@ -112,6 +121,7 @@ function analyzeCoreDumpMac (instanceInfo, options, storeArangodPath, pid) {
executeExternalAndWait('/bin/bash', args);
GDB_OUTPUT = fs.read(lldbOutputFile);
print(GDB_OUTPUT);
return 'lldb ' + storeArangodPath + ' -c /cores/core.' + pid;
}
// //////////////////////////////////////////////////////////////////////////////
@ -144,6 +154,8 @@ function analyzeCoreDumpWindows (instanceInfo) {
print('running cdb ' + JSON.stringify(args));
executeExternalAndWait('cdb', args);
return 'cdb ' + args.join(' ');
}
// //////////////////////////////////////////////////////////////////////////////
@ -189,24 +201,19 @@ function analyzeCrash (binary, arangod, options, checkStr) {
yaml.safeDump(arangod) +
'marking build as crashy.' + RESET);
let corePath = (options.coreDirectory === '')
? 'core'
: options.coreDirectory;
arangod.exitStatus.gdbHint = 'Run debugger with "gdb ' +
storeArangodPath + ' ' + corePath;
let hint = '';
if (platform.substr(0, 3) === 'win') {
// Windows: wait for procdump to do its job...
statusExternal(arangod.monitor, true);
analyzeCoreDumpWindows(arangod);
hint = analyzeCoreDumpWindows(arangod);
} else if (platform === 'darwin') {
fs.copyFile(binary, storeArangodPath);
analyzeCoreDumpMac(arangod, options, storeArangodPath, arangod.pid);
hint = analyzeCoreDumpMac(arangod, options, storeArangodPath, arangod.pid);
} else {
fs.copyFile(binary, storeArangodPath);
analyzeCoreDump(arangod, options, storeArangodPath, arangod.pid);
hint = analyzeCoreDump(arangod, options, storeArangodPath, arangod.pid);
}
arangod.exitStatus.gdbHint = 'Run debugger with "' + hint + '"';
print(RESET);
}

View File

@ -790,6 +790,7 @@ function shutdownInstance (instanceInfo, options, forceTerminate) {
} else if (arangod.exitStatus.status !== 'TERMINATED') {
if (arangod.exitStatus.hasOwnProperty('signal')) {
analyzeServerCrash(arangod, options, 'instance Shutdown - ' + arangod.exitStatus.signal);
serverCrashed = true;
}
} else {
print('Server shutdown: Success: pid', arangod.pid);

View File

@ -347,8 +347,8 @@ function printTraversalDetails (traversals) {
maxEdgeCollectionNameStrLen = node.edgeCollectionNameStrLen;
}
}
if (node.hasOwnProperty('traversalFlags')) {
var opts = optify(node.traversalFlags);
if (node.hasOwnProperty('options')) {
var opts = optify(node.options);
if (opts.length > maxOptionsLen) {
maxOptionsLen = opts.length;
}
@ -384,8 +384,8 @@ function printTraversalDetails (traversals) {
line += pad(1 + maxEdgeCollectionNameStrLen) + ' ';
}
if (traversals[i].hasOwnProperty('traversalFlags')) {
line += optify(traversals[i].traversalFlags, true) + pad(1 + maxOptionsLen - optify(traversals[i].traversalFlags, false).length) + ' ';
if (traversals[i].hasOwnProperty('options')) {
line += optify(traversals[i].options, true) + pad(1 + maxOptionsLen - optify(traversals[i].options, false).length) + ' ';
} else {
line += pad(1 + maxOptionsLen) + ' ';
}
@ -856,7 +856,7 @@ function processQuery (query, explain) {
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + collection(node.collection) + ' ' + annotation('/* ' + (node.reverse ? 'reverse ' : '') + node.index.type + ' index scan */');
case 'TraversalNode':
node.minMaxDepth = node.traversalFlags.minDepth + '..' + node.traversalFlags.maxDepth;
node.minMaxDepth = node.options.minDepth + '..' + node.options.maxDepth;
node.minMaxDepthLen = node.minMaxDepth.length;
rc = keyword('FOR ');

View File

@ -644,6 +644,10 @@ var checkIfMayBeDropped = function (colName, graphName, graphs) {
var result = true;
graphs.forEach(
function (graph) {
if (result === false) {
// Short circuit
return;
}
if (graph._key === graphName) {
return;
}
@ -2008,44 +2012,47 @@ exports._drop = function (graphId, dropCollections) {
if (dropCollections === true) {
graphs = exports._listObjects();
// Here we collect all collections
// that are leading for distribution
var initialCollections = new Set();
let dropColCB = (name) => {
if (checkIfMayBeDropped(name, graph._key, graphs)) {
try {
let colObj = db[name];
if (colObj !== undefined) {
// If it is undefined the collection is gone already
if (colObj.properties().distributeShardsLike !== undefined) {
db._drop(name);
} else {
initialCollections.add(name);
}
}
} catch (ignore) {}
}
};
// drop orphans
if (!graph.orphanCollections) {
graph.orphanCollections = [];
}
graph.orphanCollections.forEach(dropColCB);
var edgeDefinitions = graph.edgeDefinitions;
edgeDefinitions.forEach(
function (edgeDefinition) {
var from = edgeDefinition.from;
var to = edgeDefinition.to;
var collection = edgeDefinition.collection;
if (checkIfMayBeDropped(collection, graph._key, graphs)) {
db._drop(collection);
}
from.forEach(
function (col) {
if (checkIfMayBeDropped(col, graph._key, graphs)) {
db._drop(col);
}
}
);
to.forEach(
function (col) {
if (checkIfMayBeDropped(col, graph._key, graphs)) {
db._drop(col);
}
}
);
dropColCB(edgeDefinition.collection);
from.forEach(dropColCB);
to.forEach(dropColCB);
}
);
// drop orphans
if (!graph.orphanCollections) {
graph.orphanCollections = [];
for (let c of initialCollections) {
try {
db._drop(c);
} catch (e) {
console.error("Failed to Drop: '" + c + "' reason: " + e.message);
}
}
graph.orphanCollections.forEach(
function (oC) {
if (checkIfMayBeDropped(oC, graph._key, graphs)) {
try {
db._drop(oC);
} catch (ignore) {}
}
}
);
}
gdb.remove(graphId);

View File

@ -58,7 +58,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test the empty collection
////////////////////////////////////////////////////////////////////////////////
testEmpty : function () {
var c = db._collection("UnitTestsDumpEmpty");
var p = c.properties();
@ -76,7 +76,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test the collection with many documents
////////////////////////////////////////////////////////////////////////////////
testMany : function () {
var c = db._collection("UnitTestsDumpMany");
var p = c.properties();
@ -101,7 +101,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test the edges collection
////////////////////////////////////////////////////////////////////////////////
testEdges : function () {
var c = db._collection("UnitTestsDumpEdges");
var p = c.properties();
@ -128,7 +128,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test the order of documents
////////////////////////////////////////////////////////////////////////////////
testOrder : function () {
var c = db._collection("UnitTestsDumpOrder");
var p = c.properties();
@ -146,7 +146,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test document removal & update
////////////////////////////////////////////////////////////////////////////////
testRemoved : function () {
var c = db._collection("UnitTestsDumpRemoved");
var p = c.properties();
@ -178,7 +178,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test indexes
////////////////////////////////////////////////////////////////////////////////
testIndexes : function () {
var c = db._collection("UnitTestsDumpIndexes");
var p = c.properties();
@ -200,32 +200,32 @@ function dumpTestSuite () {
assertFalse(c.getIndexes()[2].unique);
assertFalse(c.getIndexes()[2].sparse);
assertEqual([ "a_s1", "a_s2" ], c.getIndexes()[2].fields);
assertEqual("hash", c.getIndexes()[3].type);
assertFalse(c.getIndexes()[3].unique);
assertFalse(c.getIndexes()[3].sparse);
assertEqual([ "a_h1", "a_h2" ], c.getIndexes()[3].fields);
assertEqual("skiplist", c.getIndexes()[4].type);
assertTrue(c.getIndexes()[4].unique);
assertFalse(c.getIndexes()[4].sparse);
assertEqual([ "a_su" ], c.getIndexes()[4].fields);
assertEqual("hash", c.getIndexes()[5].type);
assertFalse(c.getIndexes()[5].unique);
assertTrue(c.getIndexes()[5].sparse);
assertEqual([ "a_hs1", "a_hs2" ], c.getIndexes()[5].fields);
assertEqual("skiplist", c.getIndexes()[6].type);
assertFalse(c.getIndexes()[6].unique);
assertTrue(c.getIndexes()[6].sparse);
assertEqual([ "a_ss1", "a_ss2" ], c.getIndexes()[6].fields);
if (db._engine().name !== "rocksdb") {
assertFalse(c.getIndexes()[7].unique);
assertEqual("fulltext", c.getIndexes()[7].type);
assertEqual([ "a_f" ], c.getIndexes()[7].fields);
assertEqual("geo2", c.getIndexes()[8].type);
assertEqual([ "a_la", "a_lo" ], c.getIndexes()[8].fields);
assertFalse(c.getIndexes()[8].unique);
@ -237,7 +237,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test truncate
////////////////////////////////////////////////////////////////////////////////
testTruncated : function () {
var c = db._collection("UnitTestsDumpTruncated");
var p = c.properties();
@ -254,7 +254,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test keygen
////////////////////////////////////////////////////////////////////////////////
testKeygen : function () {
var c = db._collection("UnitTestsDumpKeygen");
var p = c.properties();
@ -270,7 +270,7 @@ function dumpTestSuite () {
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(1000, c.count());
for (var i = 0; i < 1000; ++i) {
var doc = c.document(String(7 + (i * 42)));
@ -283,7 +283,7 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test strings
////////////////////////////////////////////////////////////////////////////////
testStrings : function () {
var c = db._collection("UnitTestsDumpStrings");
var p = c.properties();
@ -298,18 +298,18 @@ function dumpTestSuite () {
var texts = [
"big. Really big. He moment. Magrathea! - insisted Arthur, - I do you can sense no further because it doesn't fit properly. In my the denies faith, and the atmosphere beneath You are not cheap He was was his satchel. He throughout Magrathea. - He pushed a tore the ecstatic crowd. Trillian sat down the time, the existence is it? And he said, - What they don't want this airtight hatchway. - it's we you shooting people would represent their Poet Master Grunthos is in his mind.",
"Ultimo cadere chi sedete uso chiuso voluto ora. Scotendosi portartela meraviglia ore eguagliare incessante allegrezza per. Pensava maestro pungeva un le tornano ah perduta. Fianco bearmi storia soffio prende udi poteva una. Cammino fascino elisire orecchi pollici mio cui sai sul. Chi egli sino sei dita ben. Audace agonie groppa afa vai ultima dentro scossa sii. Alcuni mia blocco cerchi eterno andare pagine poi. Ed migliore di sommesso oh ai angoscia vorresti.",
"Ultimo cadere chi sedete uso chiuso voluto ora. Scotendosi portartela meraviglia ore eguagliare incessante allegrezza per. Pensava maestro pungeva un le tornano ah perduta. Fianco bearmi storia soffio prende udi poteva una. Cammino fascino elisire orecchi pollici mio cui sai sul. Chi egli sino sei dita ben. Audace agonie groppa afa vai ultima dentro scossa sii. Alcuni mia blocco cerchi eterno andare pagine poi. Ed migliore di sommesso oh ai angoscia vorresti.",
"Νέο βάθος όλα δομές της χάσει. Μέτωπο εγώ συνάμα τρόπος και ότι όσο εφόδιο κόσμου. Προτίμηση όλη διάφορους του όλο εύθραυστη συγγραφής. Στα άρα ένα μία οποία άλλων νόημα. Ένα αποβαίνει ρεαλισμού μελετητές θεόσταλτο την. Ποντιακών και rites κοριτσάκι παπούτσια παραμύθια πει κυρ.",
"Mody laty mnie ludu pole rury Białopiotrowiczowi. Domy puer szczypię jemy pragnął zacność czytając ojca lasy Nowa wewnątrz klasztoru. Chce nóg mego wami. Zamku stał nogą imion ludzi ustaw Białopiotrowiczem. Kwiat Niesiołowskiemu nierostrzygniony Staje brał Nauka dachu dumę Zamku Kościuszkowskie zagon. Jakowaś zapytać dwie mój sama polu uszakach obyczaje Mój. Niesiołowski książkowéj zimny mały dotychczasowa Stryj przestraszone Stolnikównie wdał śmiertelnego. Stanisława charty kapeluszach mięty bratem każda brząknął rydwan.",
"Мелких против летают хижину тмится. Чудесам возьмет звездна Взжигай. . Податель сельские мучитель сверкает очищаясь пламенем. Увы имя меч Мое сия. Устранюсь воздушных Им от До мысленные потушатся Ко Ея терпеньем.",
"Мелких против летают хижину тмится. Чудесам возьмет звездна Взжигай. . Податель сельские мучитель сверкает очищаясь пламенем. Увы имя меч Мое сия. Устранюсь воздушных Им от До мысленные потушатся Ко Ея терпеньем.",
"dotyku. Výdech spalin bude položen záplavový detekční kabely 1x UPS Newave Conceptpower DPA 5x 40kVA bude ukončen v samostatné strojovně. Samotné servery mají pouze lokalita Ústí nad zdvojenou podlahou budou zakončené GateWayí HiroLink - Monitoring rozvaděče RTN na jednotlivých záplavových zón na soustrojí resp. technologie jsou označeny SA-MKx.y. Jejich výstupem je zajištěn přestupem dat z jejich provoz. Na dveřích vylepené výstražné tabulky. Kabeláž z okruhů zálohovaných obvodů v R.MON-I. Monitoring EZS, EPS, ... možno zajistit funkčností FireWallů na strukturovanou kabeláží vedenou v měrných jímkách zapuštěných v každém racku budou zakončeny v R.MON-NrNN. Monitoring motorgenerátorů: řídící systém bude zakončena v modulu",
"ramien mu zrejme vôbec niekto je už presne čo mám tendenciu prispôsobiť dych jej páčil, čo chce. Hmm... Včera sa mi pozdava, len dočkali, ale keďže som na uz boli u jej nezavrela. Hlava jej to ve městě nepotká, hodně mi to tí vedci pri hre, keď je tu pre Designiu. Pokiaľ viete o odbornejšie texty. Prvým z tmavých uličiek, každý to niekedy, zrovnávať krok s obrovským batohom na okraj vane a temné úmysly, tak rozmýšľam, aký som si hromady mailov, čo chcem a neraz sa pokúšal o filmovém klubu v budúcnosti rozhodne uniesť mladú maliarku (Linda Rybová), ktorú so",
" 復讐者」. 復讐者」. 伯母さん 復讐者」. 復讐者」. 復讐者」. 復讐者」. 第九章 第五章 第六章 第七章 第八章. 復讐者」 伯母さん. 復讐者」 伯母さん. 第十一章 第十九章 第十四章 第十八章 第十三章 第十五章. 復讐者」 . 第十四章 第十一章 第十二章 第十五章 第十七章 手配書. 第十四章 手配書 第十八章 第十七章 第十六章 第十三章. 第十一章 第十三章 第十八章 第十四章 手配書. 復讐者」."
];
texts.forEach(function (t, i) {
texts.forEach(function (t, i) {
var doc = c.document("text" + i);
assertEqual(t, doc.value);
});
@ -318,12 +318,12 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test committed trx
////////////////////////////////////////////////////////////////////////////////
testTransactionCommit : function () {
var c = db._collection("UnitTestsDumpTransactionCommit");
assertEqual(1000, c.count());
for (var i = 0; i < 1000; ++i) {
var doc = c.document("test" + i);
@ -336,12 +336,12 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test committed trx
////////////////////////////////////////////////////////////////////////////////
testTransactionUpdate : function () {
var c = db._collection("UnitTestsDumpTransactionUpdate");
assertEqual(1000, c.count());
for (var i = 0; i < 1000; ++i) {
var doc = c.document("test" + i);
@ -359,37 +359,37 @@ function dumpTestSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief test aborted trx
////////////////////////////////////////////////////////////////////////////////
testTransactionAbort : function () {
var c = db._collection("UnitTestsDumpTransactionAbort");
assertEqual(1, c.count());
assertTrue(c.exists("foo"));
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test persistent
////////////////////////////////////////////////////////////////////////////////
testPersistent : function () {
var c = db._collection("UnitTestsDumpPersistent");
var p = c.properties();
assertEqual(2, c.getIndexes().length);
assertEqual(2, c.getIndexes().length);
assertEqual("primary", c.getIndexes()[0].type);
assertEqual("persistent", c.getIndexes()[1].type);
assertEqual(10000, c.count());
var res = db._query("FOR doc IN " + c.name() + " FILTER doc.value >= 0 RETURN doc").toArray();
assertEqual(10000, res.length);
res = db._query("FOR doc IN " + c.name() + " FILTER doc.value >= 5000 RETURN doc").toArray();
assertEqual(5000, res.length);
res = db._query("FOR doc IN " + c.name() + " FILTER doc.value >= 9000 RETURN doc").toArray();
assertEqual(1000, res.length);
res = db._query("FOR doc IN " + c.name() + " FILTER doc.value >= 10000 RETURN doc").toArray();
assertEqual(0, res.length);
}
@ -404,4 +404,3 @@ function dumpTestSuite () {
jsunity.run(dumpTestSuite);
return jsunity.done();

View File

@ -0,0 +1,308 @@
/*jshint globalstrict:false, strict:false, maxlen : 4000 */
/*global assertEqual, assertTrue, assertFalse */
////////////////////////////////////////////////////////////////////////////////
/// @brief tests for dump/reload
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var internal = require("internal");
var jsunity = require("jsunity");
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function dumpTestSuite () {
'use strict';
var db = internal.db;
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test the empty collection
////////////////////////////////////////////////////////////////////////////////
testEmpty : function () {
var c = db._collection("UnitTestsDumpEmpty");
var p = c.properties();
assertEqual(2, c.type()); // document
assertTrue(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(0, c.count());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test the collection with many documents
////////////////////////////////////////////////////////////////////////////////
testMany : function () {
var c = db._collection("UnitTestsDumpMany");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(100000, c.count());
// test all documents
var r = db._query(`FOR d IN ${c.name()} RETURN d`).toArray();
var rr = new Map();
for (let i = 0; i < r.length; ++i) {
rr.set(r[i]._key, r[i]);
}
for (let i = 0; i < 100000; ++i) {
var doc = rr.get("test" + i);
assertEqual(i, doc.value1);
assertEqual("this is a test", doc.value2);
assertEqual("test" + i, doc.value3);
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test the edges collection
////////////////////////////////////////////////////////////////////////////////
testEdges : function () {
var c = db._collection("UnitTestsDumpEdges");
var p = c.properties();
assertEqual(3, c.type()); // edges
assertFalse(p.waitForSync);
assertEqual(2, c.getIndexes().length); // primary index + edges index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual("edge", c.getIndexes()[1].type);
assertEqual(10, c.count());
// test all documents
for (var i = 0; i < 10; ++i) {
var doc = c.document("test" + i);
assertEqual("test" + i, doc._key);
assertEqual("UnitTestsDumpMany/test" + i, doc._from);
assertEqual("UnitTestsDumpMany/test" + (i + 1), doc._to);
assertEqual(i + "->" + (i + 1), doc.what);
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test the order of documents
////////////////////////////////////////////////////////////////////////////////
testOrder : function () {
var c = db._collection("UnitTestsDumpOrder");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(3, c.count());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test document removal & update
////////////////////////////////////////////////////////////////////////////////
testRemoved : function () {
var c = db._collection("UnitTestsDumpRemoved");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(9000, c.count());
var i;
for (i = 0; i < 10000; ++i) {
if (i % 10 === 0) {
assertFalse(c.exists("test" + i));
}
else {
var doc = c.document("test" + i);
assertEqual(i, doc.value1);
if (i < 1000) {
assertEqual(i + 1, doc.value2);
}
}
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test indexes
////////////////////////////////////////////////////////////////////////////////
testIndexes : function () {
var c = db._collection("UnitTestsDumpIndexes");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(7, c.getIndexes().length);
assertEqual("primary", c.getIndexes()[0].type);
assertEqual("hash", c.getIndexes()[1].type);
assertTrue(c.getIndexes()[1].unique);
assertFalse(c.getIndexes()[1].sparse);
assertEqual([ "a_uc" ], c.getIndexes()[1].fields);
assertEqual("skiplist", c.getIndexes()[2].type);
assertFalse(c.getIndexes()[2].unique);
assertFalse(c.getIndexes()[2].sparse);
assertEqual([ "a_s1", "a_s2" ], c.getIndexes()[2].fields);
assertEqual("hash", c.getIndexes()[3].type);
assertFalse(c.getIndexes()[3].unique);
assertFalse(c.getIndexes()[3].sparse);
assertEqual([ "a_h1", "a_h2" ], c.getIndexes()[3].fields);
assertEqual("skiplist", c.getIndexes()[4].type);
assertTrue(c.getIndexes()[4].unique);
assertFalse(c.getIndexes()[4].sparse);
assertEqual([ "a_su" ], c.getIndexes()[4].fields);
assertEqual("hash", c.getIndexes()[5].type);
assertFalse(c.getIndexes()[5].unique);
assertTrue(c.getIndexes()[5].sparse);
assertEqual([ "a_hs1", "a_hs2" ], c.getIndexes()[5].fields);
assertEqual("skiplist", c.getIndexes()[6].type);
assertFalse(c.getIndexes()[6].unique);
assertTrue(c.getIndexes()[6].sparse);
assertEqual([ "a_ss1", "a_ss2" ], c.getIndexes()[6].fields);
assertEqual(0, c.count());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test truncate
////////////////////////////////////////////////////////////////////////////////
testTruncated : function () {
var c = db._collection("UnitTestsDumpTruncated");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(0, c.count());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test shards
////////////////////////////////////////////////////////////////////////////////
testShards : function () {
var c = db._collection("UnitTestsDumpShards");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(9, p.numberOfShards);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(1000, c.count());
for (var i = 0; i < 1000; ++i) {
var doc = c.document(String(7 + (i * 42)));
assertEqual(String(7 + (i * 42)), doc._key);
assertEqual(i, doc.value);
assertEqual({ value: [ i, i ] }, doc.more);
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test strings
////////////////////////////////////////////////////////////////////////////////
testStrings : function () {
var c = db._collection("UnitTestsDumpStrings");
var p = c.properties();
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(8, c.count());
var texts = [
"big. Really big. He moment. Magrathea! - insisted Arthur, - I do you can sense no further because it doesn't fit properly. In my the denies faith, and the atmosphere beneath You are not cheap He was was his satchel. He throughout Magrathea. - He pushed a tore the ecstatic crowd. Trillian sat down the time, the existence is it? And he said, - What they don't want this airtight hatchway. - it's we you shooting people would represent their Poet Master Grunthos is in his mind.",
"Ultimo cadere chi sedete uso chiuso voluto ora. Scotendosi portartela meraviglia ore eguagliare incessante allegrezza per. Pensava maestro pungeva un le tornano ah perduta. Fianco bearmi storia soffio prende udi poteva una. Cammino fascino elisire orecchi pollici mio cui sai sul. Chi egli sino sei dita ben. Audace agonie groppa afa vai ultima dentro scossa sii. Alcuni mia blocco cerchi eterno andare pagine poi. Ed migliore di sommesso oh ai angoscia vorresti.",
"Νέο βάθος όλα δομές της χάσει. Μέτωπο εγώ συνάμα τρόπος και ότι όσο εφόδιο κόσμου. Προτίμηση όλη διάφορους του όλο εύθραυστη συγγραφής. Στα άρα ένα μία οποία άλλων νόημα. Ένα αποβαίνει ρεαλισμού μελετητές θεόσταλτο την. Ποντιακών και rites κοριτσάκι παπούτσια παραμύθια πει κυρ.",
"Mody laty mnie ludu pole rury Białopiotrowiczowi. Domy puer szczypię jemy pragnął zacność czytając ojca lasy Nowa wewnątrz klasztoru. Chce nóg mego wami. Zamku stał nogą imion ludzi ustaw Białopiotrowiczem. Kwiat Niesiołowskiemu nierostrzygniony Staje brał Nauka dachu dumę Zamku Kościuszkowskie zagon. Jakowaś zapytać dwie mój sama polu uszakach obyczaje Mój. Niesiołowski książkowéj zimny mały dotychczasowa Stryj przestraszone Stolnikównie wdał śmiertelnego. Stanisława charty kapeluszach mięty bratem każda brząknął rydwan.",
"Мелких против летают хижину тмится. Чудесам возьмет звездна Взжигай. . Податель сельские мучитель сверкает очищаясь пламенем. Увы имя меч Мое сия. Устранюсь воздушных Им от До мысленные потушатся Ко Ея терпеньем.",
"dotyku. Výdech spalin bude položen záplavový detekční kabely 1x UPS Newave Conceptpower DPA 5x 40kVA bude ukončen v samostatné strojovně. Samotné servery mají pouze lokalita Ústí nad zdvojenou podlahou budou zakončené GateWayí HiroLink - Monitoring rozvaděče RTN na jednotlivých záplavových zón na soustrojí resp. technologie jsou označeny SA-MKx.y. Jejich výstupem je zajištěn přestupem dat z jejich provoz. Na dveřích vylepené výstražné tabulky. Kabeláž z okruhů zálohovaných obvodů v R.MON-I. Monitoring EZS, EPS, ... možno zajistit funkčností FireWallů na strukturovanou kabeláží vedenou v měrných jímkách zapuštěných v každém racku budou zakončeny v R.MON-NrNN. Monitoring motorgenerátorů: řídící systém bude zakončena v modulu",
"ramien mu zrejme vôbec niekto je už presne čo mám tendenciu prispôsobiť dych jej páčil, čo chce. Hmm... Včera sa mi pozdava, len dočkali, ale keďže som na uz boli u jej nezavrela. Hlava jej to ve městě nepotká, hodně mi to tí vedci pri hre, keď je tu pre Designiu. Pokiaľ viete o odbornejšie texty. Prvým z tmavých uličiek, každý to niekedy, zrovnávať krok s obrovským batohom na okraj vane a temné úmysly, tak rozmýšľam, aký som si hromady mailov, čo chcem a neraz sa pokúšal o filmovém klubu v budúcnosti rozhodne uniesť mladú maliarku (Linda Rybová), ktorú so",
" 復讐者」. 復讐者」. 伯母さん 復讐者」. 復讐者」. 復讐者」. 復讐者」. 第九章 第五章 第六章 第七章 第八章. 復讐者」 伯母さん. 復讐者」 伯母さん. 第十一章 第十九章 第十四章 第十八章 第十三章 第十五章. 復讐者」 . 第十四章 第十一章 第十二章 第十五章 第十七章 手配書. 第十四章 手配書 第十八章 第十七章 第十六章 第十三章. 第十一章 第十三章 第十八章 第十四章 手配書. 復讐者」."
];
texts.forEach(function (t, i) {
var doc = c.document("text" + i);
assertEqual(t, doc.value);
});
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
jsunity.run(dumpTestSuite);
return jsunity.done();

View File

@ -185,7 +185,7 @@ function dumpTestSuite () {
assertFalse(p.waitForSync);
assertFalse(p.isVolatile);
assertEqual(9, c.getIndexes().length);
assertEqual(7, c.getIndexes().length);
assertEqual("primary", c.getIndexes()[0].type);
assertEqual("hash", c.getIndexes()[1].type);
@ -241,7 +241,7 @@ function dumpTestSuite () {
assertEqual(2, c.type()); // document
assertFalse(p.waitForSync);
assertEqual(1, c.getIndexes().length); // just primary index
assertEqual("primary", c.getIndexes()[0].type);
assertEqual(0, c.count());

View File

@ -292,7 +292,7 @@ class AssocMulti {
void batchInsert(std::function<void*()> const& contextCreator,
std::function<void(void*)> const& contextDestroyer,
std::shared_ptr<std::vector<Element> const> data,
LocalTaskQueue* queue) {
std::shared_ptr<LocalTaskQueue> queue) {
if (data->empty()) {
// nothing to do
return;

View File

@ -94,7 +94,7 @@ class MultiInserterTask : public LocalTask {
public:
MultiInserterTask(
LocalTaskQueue* queue, std::function<void(void*)> contextDestroyer,
std::shared_ptr<LocalTaskQueue> queue, std::function<void(void*)> contextDestroyer,
std::vector<Bucket>* buckets,
std::function<Element(void*, Element const&, uint64_t, Bucket&,
bool const, bool const)>
@ -168,7 +168,7 @@ class MultiPartitionerTask : public LocalTask {
public:
MultiPartitionerTask(
LocalTaskQueue* queue,
std::shared_ptr<LocalTaskQueue> queue,
std::function<uint64_t(void*, Element const&, bool)> hashElement,
std::function<void(void*)> const& contextDestroyer,
std::shared_ptr<std::vector<Element> const> data, size_t lower,

View File

@ -553,7 +553,7 @@ class AssocUnique {
void batchInsert(std::function<void*()> const& contextCreator,
std::function<void(void*)> const& contextDestroyer,
std::shared_ptr<std::vector<Element> const> data,
arangodb::basics::LocalTaskQueue* queue) {
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
TRI_ASSERT(queue != nullptr);
if (data->empty()) {
// nothing to do

View File

@ -70,7 +70,7 @@ class UniqueInserterTask : public LocalTask {
public:
UniqueInserterTask(
LocalTaskQueue* queue, std::function<void(void*)> contextDestroyer,
std::shared_ptr<LocalTaskQueue> queue, std::function<void(void*)> contextDestroyer,
std::vector<Bucket>* buckets,
std::function<int(void*, Element const&, Bucket&, uint64_t)> doInsert,
std::function<bool(void*, Bucket&, uint64_t)> checkResize, size_t i,
@ -140,7 +140,7 @@ class UniquePartitionerTask : public LocalTask {
public:
UniquePartitionerTask(
LocalTaskQueue* queue,
std::shared_ptr<LocalTaskQueue> queue,
std::function<uint64_t(void*, Element const&)> hashElement,
std::function<void(void*)> const& contextDestroyer,
std::shared_ptr<std::vector<Element> const> data, size_t lower,

View File

@ -34,7 +34,7 @@ using namespace arangodb::basics;
/// @brief create a task tied to the specified queue
////////////////////////////////////////////////////////////////////////////////
LocalTask::LocalTask(LocalTaskQueue* queue) : _queue(queue) {}
LocalTask::LocalTask(std::shared_ptr<LocalTaskQueue> queue) : _queue(queue) {}
////////////////////////////////////////////////////////////////////////////////
/// @brief dispatch this task to the underlying io_service
@ -58,7 +58,7 @@ void LocalTask::dispatch() {
/// @brief create a callback task tied to the specified queue
////////////////////////////////////////////////////////////////////////////////
LocalCallbackTask::LocalCallbackTask(LocalTaskQueue* queue,
LocalCallbackTask::LocalCallbackTask(std::shared_ptr<LocalTaskQueue> queue,
std::function<void()> cb)
: _queue(queue), _cb(cb) {}

View File

@ -43,7 +43,7 @@ class LocalTask : public std::enable_shared_from_this<LocalTask> {
LocalTask(LocalTask const&) = delete;
LocalTask& operator=(LocalTask const&) = delete;
explicit LocalTask(LocalTaskQueue* queue);
explicit LocalTask(std::shared_ptr<LocalTaskQueue> queue);
virtual ~LocalTask() {}
virtual void run() = 0;
@ -54,7 +54,7 @@ class LocalTask : public std::enable_shared_from_this<LocalTask> {
/// @brief the underlying queue
//////////////////////////////////////////////////////////////////////////////
LocalTaskQueue* _queue;
std::shared_ptr<LocalTaskQueue> _queue;
};
class LocalCallbackTask
@ -64,7 +64,7 @@ class LocalCallbackTask
LocalCallbackTask(LocalCallbackTask const&) = delete;
LocalCallbackTask& operator=(LocalCallbackTask const&) = delete;
LocalCallbackTask(LocalTaskQueue* queue, std::function<void()> cb);
LocalCallbackTask(std::shared_ptr<LocalTaskQueue> queue, std::function<void()> cb);
virtual ~LocalCallbackTask() {}
virtual void run();
@ -75,7 +75,7 @@ class LocalCallbackTask
/// @brief the underlying queue
//////////////////////////////////////////////////////////////////////////////
LocalTaskQueue* _queue;
std::shared_ptr<LocalTaskQueue> _queue;
//////////////////////////////////////////////////////////////////////////////
/// @brief the callback executed by run() (any exceptions will be caught and

View File

@ -179,12 +179,13 @@ Thread::~Thread() {
}
_state.store(ThreadState::DETACHED);
return;
}
state = _state.load();
if (state != ThreadState::DETACHED && state != ThreadState::CREATED) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "thread is not detached but " << stringify(state)
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "thread '" << _name << "' is not detached but " << stringify(state)
<< ". shutting down hard";
FATAL_ERROR_ABORT();
}