mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of ssh://github.com/ArangoDB/ArangoDB into devel
This commit is contained in:
commit
3afd200599
|
@ -340,8 +340,8 @@ if (CMAKE_COMPILER_IS_CLANG)
|
|||
endif ()
|
||||
|
||||
# need c++11
|
||||
# XXX this should really be set on a per target level using cmake compile_features capabilties
|
||||
set(CMAKE_CXX_STANDARD 11)
|
||||
include(CheckCXX11Features)
|
||||
|
||||
# need threads
|
||||
find_package(Threads REQUIRED)
|
||||
|
|
|
@ -101,19 +101,7 @@ Agent::~Agent() {
|
|||
}
|
||||
}
|
||||
|
||||
if (!isStopping()) {
|
||||
|
||||
{
|
||||
CONDITION_LOCKER(guardW, _waitForCV);
|
||||
guardW.broadcast();
|
||||
}
|
||||
{
|
||||
CONDITION_LOCKER(guardA, _appendCV);
|
||||
guardA.broadcast();
|
||||
}
|
||||
|
||||
shutdown();
|
||||
}
|
||||
shutdown();
|
||||
|
||||
}
|
||||
|
||||
|
|
|
@ -483,7 +483,11 @@ void Supervision::run() {
|
|||
// that running the supervision does not make sense and will indeed
|
||||
// lead to horrible errors:
|
||||
while (!this->isStopping()) {
|
||||
std::this_thread::sleep_for(std::chrono::duration<double>(5.0));
|
||||
{
|
||||
CONDITION_LOCKER(guard, _cv);
|
||||
_cv.wait(static_cast<uint64_t>(1000000 * _frequency));
|
||||
}
|
||||
|
||||
MUTEX_LOCKER(locker, _lock);
|
||||
try {
|
||||
_snapshot = _agent->readDB().get(_agencyPrefix);
|
||||
|
|
|
@ -414,6 +414,8 @@ target_link_libraries(${BIN_ARANGOD}
|
|||
arangoserver
|
||||
)
|
||||
|
||||
target_compile_features(${BIN_ARANGOD} PRIVATE cxx_constexpr)
|
||||
|
||||
install(
|
||||
TARGETS ${BIN_ARANGOD}
|
||||
RUNTIME DESTINATION ${CMAKE_INSTALL_SBINDIR}
|
||||
|
|
|
@ -514,7 +514,7 @@ void Index::batchInsert(
|
|||
transaction::Methods* trx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&
|
||||
documents,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
for (auto const& it : documents) {
|
||||
int status = insert(trx, it.first, it.second, false);
|
||||
if (status != TRI_ERROR_NO_ERROR) {
|
||||
|
|
|
@ -250,7 +250,7 @@ class Index {
|
|||
virtual void batchInsert(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
|
||||
arangodb::basics::LocalTaskQueue* queue = nullptr);
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
|
||||
virtual int unload() = 0;
|
||||
|
||||
|
|
|
@ -73,7 +73,7 @@ namespace {
|
|||
class MMFilesIndexFillerTask : public basics::LocalTask {
|
||||
public:
|
||||
MMFilesIndexFillerTask(
|
||||
basics::LocalTaskQueue* queue, transaction::Methods* trx, Index* idx,
|
||||
std::shared_ptr<basics::LocalTaskQueue> queue, transaction::Methods* trx, Index* idx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents)
|
||||
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
|
||||
|
||||
|
@ -1464,7 +1464,7 @@ bool MMFilesCollection::openIndex(VPackSlice const& description,
|
|||
|
||||
/// @brief initializes an index with a set of existing documents
|
||||
void MMFilesCollection::fillIndex(
|
||||
arangodb::basics::LocalTaskQueue* queue, transaction::Methods* trx,
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue, transaction::Methods* trx,
|
||||
arangodb::Index* idx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
|
||||
bool skipPersistent) {
|
||||
|
@ -1554,13 +1554,14 @@ int MMFilesCollection::fillIndexes(
|
|||
TRI_ASSERT(SchedulerFeature::SCHEDULER != nullptr);
|
||||
auto ioService = SchedulerFeature::SCHEDULER->ioService();
|
||||
TRI_ASSERT(ioService != nullptr);
|
||||
arangodb::basics::LocalTaskQueue queue(ioService);
|
||||
|
||||
PerformanceLogScope logScope(
|
||||
std::string("fill-indexes-document-collection { collection: ") +
|
||||
_logicalCollection->vocbase()->name() + "/" + _logicalCollection->name() +
|
||||
" }, indexes: " + std::to_string(n - 1));
|
||||
|
||||
auto queue = std::make_shared<arangodb::basics::LocalTaskQueue>(ioService);
|
||||
|
||||
try {
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
||||
|
@ -1594,12 +1595,12 @@ int MMFilesCollection::fillIndexes(
|
|||
if (idx->type() == Index::IndexType::TRI_IDX_TYPE_PRIMARY_INDEX) {
|
||||
continue;
|
||||
}
|
||||
fillIndex(&queue, trx, idx.get(), documents, skipPersistent);
|
||||
fillIndex(queue, trx, idx.get(), documents, skipPersistent);
|
||||
}
|
||||
|
||||
queue.dispatchAndWait();
|
||||
queue->dispatchAndWait();
|
||||
|
||||
if (queue.status() != TRI_ERROR_NO_ERROR) {
|
||||
if (queue->status() != TRI_ERROR_NO_ERROR) {
|
||||
rollbackAll();
|
||||
rolledBack = true;
|
||||
}
|
||||
|
@ -1626,7 +1627,7 @@ int MMFilesCollection::fillIndexes(
|
|||
if (documents.size() == blockSize) {
|
||||
// now actually fill the secondary indexes
|
||||
insertInAllIndexes();
|
||||
if (queue.status() != TRI_ERROR_NO_ERROR) {
|
||||
if (queue->status() != TRI_ERROR_NO_ERROR) {
|
||||
break;
|
||||
}
|
||||
documents.clear();
|
||||
|
@ -1636,33 +1637,33 @@ int MMFilesCollection::fillIndexes(
|
|||
}
|
||||
|
||||
// process the remainder of the documents
|
||||
if (queue.status() == TRI_ERROR_NO_ERROR && !documents.empty()) {
|
||||
if (queue->status() == TRI_ERROR_NO_ERROR && !documents.empty()) {
|
||||
insertInAllIndexes();
|
||||
}
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
queue.setStatus(ex.code());
|
||||
queue->setStatus(ex.code());
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "caught exception while filling indexes: " << ex.what();
|
||||
} catch (std::bad_alloc const&) {
|
||||
queue.setStatus(TRI_ERROR_OUT_OF_MEMORY);
|
||||
queue->setStatus(TRI_ERROR_OUT_OF_MEMORY);
|
||||
} catch (std::exception const& ex) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "caught exception while filling indexes: " << ex.what();
|
||||
queue.setStatus(TRI_ERROR_INTERNAL);
|
||||
queue->setStatus(TRI_ERROR_INTERNAL);
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME)
|
||||
<< "caught unknown exception while filling indexes";
|
||||
queue.setStatus(TRI_ERROR_INTERNAL);
|
||||
queue->setStatus(TRI_ERROR_INTERNAL);
|
||||
}
|
||||
|
||||
if (queue.status() != TRI_ERROR_NO_ERROR && !rolledBack) {
|
||||
if (queue->status() != TRI_ERROR_NO_ERROR && !rolledBack) {
|
||||
try {
|
||||
rollbackAll();
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
||||
return queue.status();
|
||||
return queue->status();
|
||||
}
|
||||
|
||||
/// @brief opens an existing collection
|
||||
|
|
|
@ -398,7 +398,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
bool openIndex(VPackSlice const& description, transaction::Methods* trx);
|
||||
|
||||
/// @brief initializes an index with all existing documents
|
||||
void fillIndex(basics::LocalTaskQueue*, transaction::Methods*, Index*,
|
||||
void fillIndex(std::shared_ptr<basics::LocalTaskQueue>, transaction::Methods*, Index*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const&,
|
||||
bool);
|
||||
|
||||
|
|
|
@ -329,7 +329,7 @@ int MMFilesEdgeIndex::remove(transaction::Methods* trx,
|
|||
void MMFilesEdgeIndex::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
if (documents.empty()) {
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -111,7 +111,7 @@ class MMFilesEdgeIndex final : public Index {
|
|||
|
||||
void batchInsert(transaction::Methods*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const&,
|
||||
arangodb::basics::LocalTaskQueue*) override;
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue>) override;
|
||||
|
||||
int unload() override;
|
||||
|
||||
|
|
|
@ -644,7 +644,7 @@ int MMFilesHashIndex::remove(transaction::Methods* trx,
|
|||
void MMFilesHashIndex::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
TRI_ASSERT(queue != nullptr);
|
||||
if (_unique) {
|
||||
batchInsertUnique(trx, documents, queue);
|
||||
|
@ -760,7 +760,7 @@ int MMFilesHashIndex::insertUnique(transaction::Methods* trx,
|
|||
void MMFilesHashIndex::batchInsertUnique(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
TRI_ASSERT(queue != nullptr);
|
||||
std::shared_ptr<std::vector<MMFilesHashIndexElement*>> elements;
|
||||
elements.reset(new std::vector<MMFilesHashIndexElement*>());
|
||||
|
@ -880,7 +880,7 @@ int MMFilesHashIndex::insertMulti(transaction::Methods* trx,
|
|||
void MMFilesHashIndex::batchInsertMulti(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
TRI_ASSERT(queue != nullptr);
|
||||
std::shared_ptr<std::vector<MMFilesHashIndexElement*>> elements;
|
||||
elements.reset(new std::vector<MMFilesHashIndexElement*>());
|
||||
|
|
|
@ -173,7 +173,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
void batchInsert(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
|
||||
arangodb::basics::LocalTaskQueue* queue = nullptr) override;
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
|
||||
|
||||
int unload() override;
|
||||
|
||||
|
@ -205,7 +205,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
void batchInsertUnique(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
|
||||
arangodb::basics::LocalTaskQueue* queue = nullptr);
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
|
||||
int insertMulti(transaction::Methods*, TRI_voc_rid_t,
|
||||
arangodb::velocypack::Slice const&, bool isRollback);
|
||||
|
@ -213,7 +213,7 @@ class MMFilesHashIndex final : public MMFilesPathBasedIndex {
|
|||
void batchInsertMulti(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
|
||||
arangodb::basics::LocalTaskQueue* queue = nullptr);
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue);
|
||||
|
||||
int removeUniqueElement(transaction::Methods*, MMFilesHashIndexElement*,
|
||||
bool);
|
||||
|
|
|
@ -1592,14 +1592,15 @@ int MMFilesRestReplicationHandler::processRestoreCollectionCoordinator(
|
|||
if (dropExisting) {
|
||||
int res = ci->dropCollectionCoordinator(dbName, col->cid_as_string(),
|
||||
errorMsg, 0.0);
|
||||
if (res == TRI_ERROR_FORBIDDEN) {
|
||||
if (res == TRI_ERROR_FORBIDDEN ||
|
||||
res == TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE) {
|
||||
// some collections must not be dropped
|
||||
res = truncateCollectionOnCoordinator(dbName, name);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg =
|
||||
"unable to truncate collection (dropping is forbidden): " + name;
|
||||
return res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
|
|
@ -32,18 +32,18 @@
|
|||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "MMFiles/MMFilesCollection.h" //TODO -- Remove -- ditches
|
||||
#include "MMFiles/MMFilesDatafileHelper.h"
|
||||
#include "MMFiles/MMFilesDitch.h"
|
||||
#include "MMFiles/MMFilesIndexElement.h"
|
||||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "SimpleHttpClient/SimpleHttpClient.h"
|
||||
#include "SimpleHttpClient/SimpleHttpResult.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h" b
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Utils/CollectionGuard.h"
|
||||
|
@ -64,6 +64,7 @@ using namespace arangodb;
|
|||
using namespace arangodb::basics;
|
||||
using namespace arangodb::httpclient;
|
||||
using namespace arangodb::rest;
|
||||
using namespace arangodb::rocksutils;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief performs a binary search for the given key in the markers vector
|
||||
|
@ -273,7 +274,9 @@ int InitialSyncer::run(std::string& errorMsg, bool incremental) {
|
|||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": invalid JSON";
|
||||
} else {
|
||||
res = handleInventoryResponse(slice, incremental, errorMsg);
|
||||
auto pair = stripObjectIds(slice);
|
||||
res = handleInventoryResponse(pair.first, incremental,
|
||||
errorMsg);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1027,11 +1030,12 @@ int InitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
|
|||
// now we can fetch the complete chunk information from the master
|
||||
try {
|
||||
if (std::strcmp("mmfiles", EngineSelectorFeature::engineName()) == 0) {
|
||||
res = handleSyncKeysMMFiles(col, id.copyString(), cid, collectionName, maxTick,
|
||||
errorMsg);
|
||||
} else if (std::strcmp("rocksdb", EngineSelectorFeature::engineName()) == 0) {
|
||||
res = handleSyncKeysRocksDB(col, id.copyString(), cid, collectionName, maxTick,
|
||||
errorMsg);
|
||||
res = handleSyncKeysMMFiles(col, id.copyString(), cid, collectionName,
|
||||
maxTick, errorMsg);
|
||||
} else if (std::strcmp("rocksdb", EngineSelectorFeature::engineName()) ==
|
||||
0) {
|
||||
res = handleSyncKeysRocksDB(col, id.copyString(), cid, collectionName,
|
||||
maxTick, errorMsg);
|
||||
} else {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);
|
||||
}
|
||||
|
@ -1052,11 +1056,11 @@ int InitialSyncer::handleCollectionSync(arangodb::LogicalCollection* col,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick,
|
||||
std::string& errorMsg) {
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick,
|
||||
std::string& errorMsg) {
|
||||
std::string progress =
|
||||
"collecting local keys for collection '" + collectionName + "'";
|
||||
setProgress(progress);
|
||||
|
@ -1160,21 +1164,23 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
std::string const lowKey(lowSlice.copyString());
|
||||
std::string const highKey(highSlice.copyString());
|
||||
|
||||
|
||||
LogicalCollection* coll = trx.documentCollection();
|
||||
std::unique_ptr<IndexIterator> iterator = coll->getAllIterator(&trx, &mmdr, false);
|
||||
iterator->next([&] (DocumentIdentifierToken const& token) {
|
||||
if (coll->readDocument(&trx, token, mmdr) == false) {
|
||||
return;
|
||||
}
|
||||
VPackSlice doc(mmdr.vpack());
|
||||
VPackSlice key = doc.get(StaticStrings::KeyString);
|
||||
if (key.compareString(lowKey.data(), lowKey.length()) < 0) {
|
||||
trx.remove(collectionName, key, options);
|
||||
} else if (key.compareString(highKey.data(), highKey.length()) > 0) {
|
||||
trx.remove(collectionName, key, options);
|
||||
}
|
||||
}, UINT64_MAX);
|
||||
std::unique_ptr<IndexIterator> iterator =
|
||||
coll->getAllIterator(&trx, &mmdr, false);
|
||||
iterator->next(
|
||||
[&](DocumentIdentifierToken const& token) {
|
||||
if (coll->readDocument(&trx, token, mmdr) == false) {
|
||||
return;
|
||||
}
|
||||
VPackSlice doc(mmdr.vpack());
|
||||
VPackSlice key = doc.get(StaticStrings::KeyString);
|
||||
if (key.compareString(lowKey.data(), lowKey.length()) < 0) {
|
||||
trx.remove(collectionName, key, options);
|
||||
} else if (key.compareString(highKey.data(), highKey.length()) > 0) {
|
||||
trx.remove(collectionName, key, options);
|
||||
}
|
||||
},
|
||||
UINT64_MAX);
|
||||
|
||||
trx.commit();
|
||||
}
|
||||
|
@ -1197,7 +1203,6 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
return res.errorNumber();
|
||||
}
|
||||
|
||||
|
||||
// We do not take responsibility for the index.
|
||||
// The LogicalCollection is protected by trx.
|
||||
// Neither it nor it's indexes can be invalidated
|
||||
|
@ -1212,19 +1217,19 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
std::vector<std::pair<std::string, uint64_t>> markers;
|
||||
bool foundLowKey = false;
|
||||
|
||||
auto resetChunk = [&] () -> void {
|
||||
auto resetChunk = [&]() -> void {
|
||||
sendExtendBatch();
|
||||
sendExtendBarrier();
|
||||
|
||||
progress = "processing keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "'";
|
||||
" for collection '" + collectionName + "'";
|
||||
setProgress(progress);
|
||||
|
||||
// read remote chunk
|
||||
VPackSlice chunk = chunkSlice.at(currentChunkId);
|
||||
if (!chunk.isObject()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": chunk is no object";
|
||||
_masterInfo._endpoint + ": chunk is no object";
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_REPLICATION_INVALID_RESPONSE);
|
||||
}
|
||||
|
||||
|
@ -1234,8 +1239,8 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
if (!lowSlice.isString() || !highSlice.isString() ||
|
||||
!hashSlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": chunks in response have an invalid format";
|
||||
_masterInfo._endpoint +
|
||||
": chunks in response have an invalid format";
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_REPLICATION_INVALID_RESPONSE);
|
||||
}
|
||||
|
||||
|
@ -1250,8 +1255,8 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
// set to first chunk
|
||||
resetChunk();
|
||||
|
||||
std::function<void(VPackSlice, VPackSlice)> parseDoc =
|
||||
[&] (VPackSlice doc, VPackSlice key) {
|
||||
std::function<void(VPackSlice, VPackSlice)> parseDoc = [&](VPackSlice doc,
|
||||
VPackSlice key) {
|
||||
|
||||
bool rangeUneqal = false;
|
||||
bool nextChunk = false;
|
||||
|
@ -1263,7 +1268,8 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
// smaller values than lowKey mean they don't exist remotely
|
||||
trx.remove(collectionName, key, options);
|
||||
return;
|
||||
} if (cmp1 >= 0 && cmp2 <= 0) {
|
||||
}
|
||||
if (cmp1 >= 0 && cmp2 <= 0) {
|
||||
// we only need to hash we are in the range
|
||||
if (cmp1 == 0) {
|
||||
foundLowKey = true;
|
||||
|
@ -1279,7 +1285,7 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
|
||||
markers.emplace_back(key.copyString(), TRI_ExtractRevisionId(doc));
|
||||
|
||||
if (cmp2 == 0) {// found highKey
|
||||
if (cmp2 == 0) { // found highKey
|
||||
rangeUneqal = std::to_string(localHash) != hashString;
|
||||
nextChunk = true;
|
||||
}
|
||||
|
@ -1287,7 +1293,7 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
rangeUneqal = true;
|
||||
nextChunk = true;
|
||||
}
|
||||
} else if (cmp2 > 0) { // higher than highKey
|
||||
} else if (cmp2 > 0) { // higher than highKey
|
||||
// current range was unequal and we did not find the
|
||||
// high key. Load range and skip to next
|
||||
rangeUneqal = true;
|
||||
|
@ -1295,16 +1301,15 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
}
|
||||
|
||||
if (rangeUneqal) {
|
||||
int res = syncChunkRocksDB(&trx, keysId, currentChunkId,
|
||||
lowKey, highKey,
|
||||
markers, errorMsg);
|
||||
int res = syncChunkRocksDB(&trx, keysId, currentChunkId, lowKey,
|
||||
highKey, markers, errorMsg);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
}
|
||||
TRI_ASSERT(!rangeUneqal || rangeUneqal && nextChunk); // A => B
|
||||
if (nextChunk && currentChunkId+1 < numChunks) {
|
||||
currentChunkId++;// we are out of range, see next chunk
|
||||
TRI_ASSERT(!rangeUneqal || (rangeUneqal && nextChunk)); // A => B
|
||||
if (nextChunk && currentChunkId + 1 < numChunks) {
|
||||
currentChunkId++; // we are out of range, see next chunk
|
||||
resetChunk();
|
||||
|
||||
// key is higher than upper bound, recheck the current document
|
||||
|
@ -1314,15 +1319,18 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
}
|
||||
};
|
||||
|
||||
std::unique_ptr<IndexIterator> iterator = col->getAllIterator(&trx, &mmdr, false);
|
||||
iterator->next([&] (DocumentIdentifierToken const& token) {
|
||||
if (col->readDocument(&trx, token, mmdr) == false) {
|
||||
return;
|
||||
}
|
||||
VPackSlice doc(mmdr.vpack());
|
||||
VPackSlice key = doc.get(StaticStrings::KeyString);
|
||||
parseDoc(doc, key);
|
||||
}, UINT64_MAX);
|
||||
std::unique_ptr<IndexIterator> iterator =
|
||||
col->getAllIterator(&trx, &mmdr, false);
|
||||
iterator->next(
|
||||
[&](DocumentIdentifierToken const& token) {
|
||||
if (col->readDocument(&trx, token, mmdr) == false) {
|
||||
return;
|
||||
}
|
||||
VPackSlice doc(mmdr.vpack());
|
||||
VPackSlice key = doc.get(StaticStrings::KeyString);
|
||||
parseDoc(doc, key);
|
||||
},
|
||||
UINT64_MAX);
|
||||
|
||||
res = trx.commit();
|
||||
if (!res.ok()) {
|
||||
|
@ -1333,14 +1341,12 @@ int InitialSyncer::handleSyncKeysRocksDB(arangodb::LogicalCollection* col,
|
|||
return res;
|
||||
}
|
||||
|
||||
|
||||
int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
||||
std::string const& keysId,
|
||||
uint64_t chunkId,
|
||||
std::string const& lowString,
|
||||
std::string const& highString,
|
||||
std::vector<std::pair<std::string, uint64_t>> markers,
|
||||
std::string& errorMsg) {
|
||||
int InitialSyncer::syncChunkRocksDB(
|
||||
SingleCollectionTransaction* trx, std::string const& keysId,
|
||||
uint64_t chunkId, std::string const& lowString,
|
||||
std::string const& highString,
|
||||
std::vector<std::pair<std::string, uint64_t>> markers,
|
||||
std::string& errorMsg) {
|
||||
std::string const baseUrl = BaseUrl + "/keys";
|
||||
TRI_voc_tick_t const chunkSize = 5000;
|
||||
std::string const& collectionName = trx->documentCollection()->name();
|
||||
|
@ -1353,18 +1359,19 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
// no match
|
||||
// must transfer keys for non-matching range
|
||||
std::string url = baseUrl + "/" + keysId + "?type=keys&chunk=" +
|
||||
std::to_string(chunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::to_string(chunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
|
||||
std::string progress = "fetching keys chunk '" + std::to_string(chunkId) + "' from " + url;
|
||||
std::string progress =
|
||||
"fetching keys chunk '" + std::to_string(chunkId) + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
std::unique_ptr<SimpleHttpResult> response(
|
||||
_client->retryRequest(rest::RequestType::PUT, url, nullptr, 0));
|
||||
_client->retryRequest(rest::RequestType::PUT, url, nullptr, 0));
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
|
||||
": " + _client->getErrorMessage();
|
||||
": " + _client->getErrorMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_NO_RESPONSE;
|
||||
}
|
||||
|
@ -1372,10 +1379,9 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
TRI_ASSERT(response != nullptr);
|
||||
|
||||
if (response->wasHttpError()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
|
||||
": HTTP " + StringUtils::itoa(response->getHttpReturnCode()) +
|
||||
": " + response->getHttpReturnMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_MASTER_ERROR;
|
||||
}
|
||||
|
@ -1384,16 +1390,16 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
int res = parseResponse(builder, response.get());
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
|
||||
": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
||||
VPackSlice const responseBody = builder->slice();
|
||||
if (!responseBody.isArray()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
|
||||
": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1427,11 +1433,10 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
size_t nextStart = 0;
|
||||
|
||||
for (VPackSlice const& pair : VPackArrayIterator(responseBody)) {
|
||||
|
||||
if (!pair.isArray() || pair.length() != 2) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": response key pair is no valid array";
|
||||
_masterInfo._endpoint +
|
||||
": response key pair is no valid array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1440,7 +1445,7 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
VPackSlice const keySlice = pair.at(0);
|
||||
if (!keySlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": response key is no string";
|
||||
_masterInfo._endpoint + ": response key is no string";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1491,7 +1496,6 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
i++;
|
||||
}
|
||||
|
||||
|
||||
if (!toFetch.empty()) {
|
||||
VPackBuilder keysBuilder;
|
||||
keysBuilder.openArray();
|
||||
|
@ -1501,22 +1505,21 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
keysBuilder.close();
|
||||
|
||||
std::string url = baseUrl + "/" + keysId + "?type=docs&chunk=" +
|
||||
std::to_string(chunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
progress = "fetching documents chunk " +
|
||||
std::to_string(chunkId) + " for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
std::to_string(chunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
progress = "fetching documents chunk " + std::to_string(chunkId) +
|
||||
" for collection '" + collectionName + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
std::string const keyJsonString(keysBuilder.slice().toJson());
|
||||
|
||||
std::unique_ptr<SimpleHttpResult> response(
|
||||
_client->retryRequest(rest::RequestType::PUT, url,
|
||||
keyJsonString.c_str(), keyJsonString.size()));
|
||||
_client->retryRequest(rest::RequestType::PUT, url,
|
||||
keyJsonString.c_str(), keyJsonString.size()));
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
|
||||
": " + _client->getErrorMessage();
|
||||
": " + _client->getErrorMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_NO_RESPONSE;
|
||||
}
|
||||
|
@ -1525,9 +1528,9 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
|
||||
if (response->wasHttpError()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_MASTER_ERROR;
|
||||
}
|
||||
|
@ -1537,8 +1540,7 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
std::string(_masterInfo._endpoint) +
|
||||
": response is no array";
|
||||
std::string(_masterInfo._endpoint) + ": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1546,7 +1548,7 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
VPackSlice const slice = builder->slice();
|
||||
if (!slice.isArray()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1554,7 +1556,7 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
for (auto const& it : VPackArrayIterator(slice)) {
|
||||
if (!it.isObject()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": document is no object";
|
||||
_masterInfo._endpoint + ": document is no object";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1563,7 +1565,7 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
|
||||
if (!keySlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": document key is invalid";
|
||||
_masterInfo._endpoint + ": document key is invalid";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1572,7 +1574,7 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
|
||||
if (!revSlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": document revision is invalid";
|
||||
_masterInfo._endpoint + ": document revision is invalid";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1602,12 +1604,13 @@ int InitialSyncer::syncChunkRocksDB(SingleCollectionTransaction* trx,
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
||||
std::string const& keysId, std::string const& cid,
|
||||
std::string const& collectionName, TRI_voc_tick_t maxTick,
|
||||
std::string& errorMsg) {
|
||||
|
||||
std::string const& keysId,
|
||||
std::string const& cid,
|
||||
std::string const& collectionName,
|
||||
TRI_voc_tick_t maxTick,
|
||||
std::string& errorMsg) {
|
||||
std::string progress =
|
||||
"collecting local keys for collection '" + collectionName + "'";
|
||||
"collecting local keys for collection '" + collectionName + "'";
|
||||
setProgress(progress);
|
||||
|
||||
// fetch all local keys from primary index
|
||||
|
@ -1618,19 +1621,22 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
// acquire a replication ditch so no datafiles are thrown away from now on
|
||||
// note: the ditch also protects against unloading the collection
|
||||
{
|
||||
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(_vocbase), col->cid(), AccessMode::Type::READ);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(_vocbase), col->cid(),
|
||||
AccessMode::Type::READ);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
errorMsg = std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(),errorMsg);
|
||||
errorMsg =
|
||||
std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(), errorMsg);
|
||||
return res.errorNumber();
|
||||
}
|
||||
|
||||
ditch = arangodb::MMFilesCollection::toMMFilesCollection(col)
|
||||
->ditches()
|
||||
->createMMFilesDocumentDitch(false, __FILE__, __LINE__);
|
||||
->ditches()
|
||||
->createMMFilesDocumentDitch(false, __FILE__, __LINE__);
|
||||
|
||||
if (ditch == nullptr) {
|
||||
return TRI_ERROR_OUT_OF_MEMORY;
|
||||
|
@ -1640,17 +1646,20 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
TRI_ASSERT(ditch != nullptr);
|
||||
|
||||
TRI_DEFER(arangodb::MMFilesCollection::toMMFilesCollection(col)
|
||||
->ditches()
|
||||
->freeDitch(ditch));
|
||||
->ditches()
|
||||
->freeDitch(ditch));
|
||||
|
||||
{
|
||||
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(_vocbase), col->cid(), AccessMode::Type::READ);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(_vocbase), col->cid(),
|
||||
AccessMode::Type::READ);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
errorMsg = std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(),errorMsg);
|
||||
errorMsg =
|
||||
std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(), errorMsg);
|
||||
return res.errorNumber();
|
||||
}
|
||||
|
||||
|
@ -1662,18 +1671,20 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
uint64_t iterations = 0;
|
||||
ManagedDocumentResult mmdr;
|
||||
trx.invokeOnAllElements(trx.name(), [this, &trx, &mmdr, &markers, &iterations](DocumentIdentifierToken const& token) {
|
||||
if (trx.documentCollection()->readDocument(&trx, token, mmdr)) {
|
||||
markers.emplace_back(mmdr.vpack());
|
||||
trx.invokeOnAllElements(
|
||||
trx.name(), [this, &trx, &mmdr, &markers,
|
||||
&iterations](DocumentIdentifierToken const& token) {
|
||||
if (trx.documentCollection()->readDocument(&trx, token, mmdr)) {
|
||||
markers.emplace_back(mmdr.vpack());
|
||||
|
||||
if (++iterations % 10000 == 0) {
|
||||
if (checkAborted()) {
|
||||
return false;
|
||||
if (++iterations % 10000 == 0) {
|
||||
if (checkAborted()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
});
|
||||
return true;
|
||||
});
|
||||
|
||||
if (checkAborted()) {
|
||||
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
|
||||
|
@ -1683,35 +1694,35 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
sendExtendBarrier();
|
||||
|
||||
std::string progress = "sorting " + std::to_string(markers.size()) +
|
||||
" local key(s) for collection '" + collectionName +
|
||||
"'";
|
||||
" local key(s) for collection '" + collectionName +
|
||||
"'";
|
||||
setProgress(progress);
|
||||
|
||||
// sort all our local keys
|
||||
std::sort(
|
||||
markers.begin(), markers.end(),
|
||||
[](uint8_t const* lhs, uint8_t const* rhs) -> bool {
|
||||
VPackSlice const l(lhs);
|
||||
VPackSlice const r(rhs);
|
||||
std::sort(markers.begin(), markers.end(), [](uint8_t const* lhs,
|
||||
uint8_t const* rhs) -> bool {
|
||||
VPackSlice const l(lhs);
|
||||
VPackSlice const r(rhs);
|
||||
|
||||
VPackValueLength lLength, rLength;
|
||||
char const* lKey = l.get(StaticStrings::KeyString).getString(lLength);
|
||||
char const* rKey = r.get(StaticStrings::KeyString).getString(rLength);
|
||||
VPackValueLength lLength, rLength;
|
||||
char const* lKey = l.get(StaticStrings::KeyString).getString(lLength);
|
||||
char const* rKey = r.get(StaticStrings::KeyString).getString(rLength);
|
||||
|
||||
size_t const length = static_cast<size_t>(lLength < rLength ? lLength : rLength);
|
||||
int res = memcmp(lKey, rKey, length);
|
||||
size_t const length =
|
||||
static_cast<size_t>(lLength < rLength ? lLength : rLength);
|
||||
int res = memcmp(lKey, rKey, length);
|
||||
|
||||
if (res < 0) {
|
||||
// left is smaller than right
|
||||
return true;
|
||||
}
|
||||
if (res == 0 && lLength < rLength) {
|
||||
// left is equal to right, but of shorter length
|
||||
return true;
|
||||
}
|
||||
if (res < 0) {
|
||||
// left is smaller than right
|
||||
return true;
|
||||
}
|
||||
if (res == 0 && lLength < rLength) {
|
||||
// left is equal to right, but of shorter length
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
});
|
||||
return false;
|
||||
});
|
||||
}
|
||||
|
||||
if (checkAborted()) {
|
||||
|
@ -1727,18 +1738,17 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
std::string const baseUrl = BaseUrl + "/keys";
|
||||
|
||||
std::string url =
|
||||
baseUrl + "/" + keysId + "?chunkSize=" + std::to_string(chunkSize);
|
||||
baseUrl + "/" + keysId + "?chunkSize=" + std::to_string(chunkSize);
|
||||
progress = "fetching remote keys chunks for collection '" + collectionName +
|
||||
"' from " + url;
|
||||
"' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
std::unique_ptr<SimpleHttpResult> response(
|
||||
_client->retryRequest(rest::RequestType::GET, url, nullptr, 0));
|
||||
_client->retryRequest(rest::RequestType::GET, url, nullptr, 0));
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
errorMsg = "could not connect to master at " +
|
||||
_masterInfo._endpoint + ": " +
|
||||
_client->getErrorMessage();
|
||||
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
|
||||
": " + _client->getErrorMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_NO_RESPONSE;
|
||||
}
|
||||
|
@ -1746,10 +1756,9 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
TRI_ASSERT(response != nullptr);
|
||||
|
||||
if (response->wasHttpError()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
|
||||
": HTTP " + StringUtils::itoa(response->getHttpReturnCode()) +
|
||||
": " + response->getHttpReturnMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_MASTER_ERROR;
|
||||
}
|
||||
|
@ -1759,8 +1768,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
std::string(_masterInfo._endpoint) +
|
||||
": invalid response is no array";
|
||||
std::string(_masterInfo._endpoint) +
|
||||
": invalid response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1768,8 +1777,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
VPackSlice const slice = builder->slice();
|
||||
|
||||
if (!slice.isArray()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
errorMsg = "got invalid response from master at " + _masterInfo._endpoint +
|
||||
": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1785,13 +1794,16 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
// remove all keys that are below first remote key or beyond last remote key
|
||||
if (n > 0) {
|
||||
// first chunk
|
||||
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(_vocbase), col->cid(), AccessMode::Type::WRITE);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(_vocbase), col->cid(),
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
errorMsg = std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(),errorMsg);
|
||||
errorMsg =
|
||||
std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(), errorMsg);
|
||||
return res.errorNumber();
|
||||
}
|
||||
|
||||
|
@ -1857,17 +1869,20 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
|
||||
}
|
||||
|
||||
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(_vocbase), col->cid(), AccessMode::Type::WRITE);
|
||||
SingleCollectionTransaction trx(
|
||||
transaction::StandaloneContext::Create(_vocbase), col->cid(),
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
Result res = trx.begin();
|
||||
|
||||
if (!res.ok()) {
|
||||
errorMsg = std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(),res.errorMessage());
|
||||
errorMsg =
|
||||
std::string("unable to start transaction: ") + res.errorMessage();
|
||||
res.reset(res.errorNumber(), res.errorMessage());
|
||||
return res.errorNumber();
|
||||
}
|
||||
|
||||
trx.pinData(col->cid()); // will throw when it fails
|
||||
trx.pinData(col->cid()); // will throw when it fails
|
||||
|
||||
// We do not take responsibility for the index.
|
||||
// The LogicalCollection is protected by trx.
|
||||
|
@ -1875,12 +1890,12 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
// TODO Move to MMFiles
|
||||
auto physical = static_cast<MMFilesCollection*>(
|
||||
trx.documentCollection()->getPhysical());
|
||||
trx.documentCollection()->getPhysical());
|
||||
auto idx = physical->primaryIndex();
|
||||
|
||||
size_t const currentChunkId = i;
|
||||
progress = "processing keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "'";
|
||||
" for collection '" + collectionName + "'";
|
||||
setProgress(progress);
|
||||
|
||||
sendExtendBatch();
|
||||
|
@ -1891,7 +1906,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (!chunk.isObject()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": chunk is no object";
|
||||
_masterInfo._endpoint + ": chunk is no object";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1903,8 +1918,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
if (!lowSlice.isString() || !highSlice.isString() ||
|
||||
!hashSlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": chunks in response have an invalid format";
|
||||
_masterInfo._endpoint +
|
||||
": chunks in response have an invalid format";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1914,8 +1929,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
size_t localFrom;
|
||||
size_t localTo;
|
||||
bool match =
|
||||
FindRange(markers, lowString, highString, localFrom, localTo);
|
||||
bool match = FindRange(markers, lowString, highString, localFrom, localTo);
|
||||
|
||||
if (match) {
|
||||
// now must hash the range
|
||||
|
@ -1940,19 +1954,18 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
// no match
|
||||
// must transfer keys for non-matching range
|
||||
std::string url = baseUrl + "/" + keysId + "?type=keys&chunk=" +
|
||||
std::to_string(i) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::to_string(i) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
progress = "fetching keys chunk " + std::to_string(currentChunkId) +
|
||||
" for collection '" + collectionName + "' from " + url;
|
||||
" for collection '" + collectionName + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
std::unique_ptr<SimpleHttpResult> response(_client->retryRequest(
|
||||
rest::RequestType::PUT, url, nullptr, 0));
|
||||
std::unique_ptr<SimpleHttpResult> response(
|
||||
_client->retryRequest(rest::RequestType::PUT, url, nullptr, 0));
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
errorMsg = "could not connect to master at " +
|
||||
_masterInfo._endpoint + ": " +
|
||||
_client->getErrorMessage();
|
||||
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
|
||||
": " + _client->getErrorMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_NO_RESPONSE;
|
||||
}
|
||||
|
@ -1961,9 +1974,9 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (response->wasHttpError()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_MASTER_ERROR;
|
||||
}
|
||||
|
@ -1973,8 +1986,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": response is no array";
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -1982,17 +1994,16 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
VPackSlice const slice = builder->slice();
|
||||
if (!slice.isArray()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": response is no array";
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
||||
|
||||
// delete all keys at start of the range
|
||||
while (nextStart < markers.size()) {
|
||||
VPackSlice const keySlice(markers[nextStart]);
|
||||
std::string const localKey(keySlice.get(StaticStrings::KeyString).copyString());
|
||||
std::string const localKey(
|
||||
keySlice.get(StaticStrings::KeyString).copyString());
|
||||
|
||||
if (localKey.compare(lowString) < 0) {
|
||||
// we have a local key that is not present remotely
|
||||
|
@ -2018,8 +2029,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (!pair.isArray() || pair.length() != 2) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": response key pair is no valid array";
|
||||
_masterInfo._endpoint +
|
||||
": response key pair is no valid array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -2029,8 +2040,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (!keySlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": response key is no string";
|
||||
_masterInfo._endpoint + ": response key is no string";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -2046,7 +2056,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
while (nextStart < markers.size()) {
|
||||
VPackSlice const localKeySlice(markers[nextStart]);
|
||||
std::string const localKey(localKeySlice.get(StaticStrings::KeyString).copyString());
|
||||
std::string const localKey(
|
||||
localKeySlice.get(StaticStrings::KeyString).copyString());
|
||||
|
||||
int res = localKey.compare(keyString);
|
||||
|
||||
|
@ -2070,7 +2081,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
if (!element) {
|
||||
// key not found locally
|
||||
toFetch.emplace_back(i);
|
||||
} else if (TRI_RidToString(element.revisionId()) != pair.at(1).copyString()) {
|
||||
} else if (TRI_RidToString(element.revisionId()) !=
|
||||
pair.at(1).copyString()) {
|
||||
// key found, but revision id differs
|
||||
toFetch.emplace_back(i);
|
||||
++nextStart;
|
||||
|
@ -2086,7 +2098,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
while (nextStart < markers.size()) {
|
||||
VPackSlice const localKeySlice(markers[nextStart]);
|
||||
std::string const localKey(localKeySlice.get(StaticStrings::KeyString).copyString());
|
||||
std::string const localKey(
|
||||
localKeySlice.get(StaticStrings::KeyString).copyString());
|
||||
|
||||
int res = localKey.compare(highString);
|
||||
|
||||
|
@ -2107,23 +2120,22 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
keysBuilder.close();
|
||||
|
||||
std::string url = baseUrl + "/" + keysId + "?type=docs&chunk=" +
|
||||
std::to_string(currentChunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
std::to_string(currentChunkId) + "&chunkSize=" +
|
||||
std::to_string(chunkSize);
|
||||
progress = "fetching documents chunk " +
|
||||
std::to_string(currentChunkId) + " for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
std::to_string(currentChunkId) + " for collection '" +
|
||||
collectionName + "' from " + url;
|
||||
setProgress(progress);
|
||||
|
||||
std::string const keyJsonString(keysBuilder.slice().toJson());
|
||||
|
||||
std::unique_ptr<SimpleHttpResult> response(
|
||||
_client->retryRequest(rest::RequestType::PUT, url,
|
||||
keyJsonString.c_str(), keyJsonString.size()));
|
||||
_client->retryRequest(rest::RequestType::PUT, url,
|
||||
keyJsonString.c_str(), keyJsonString.size()));
|
||||
|
||||
if (response == nullptr || !response->isComplete()) {
|
||||
errorMsg = "could not connect to master at " +
|
||||
_masterInfo._endpoint + ": " +
|
||||
_client->getErrorMessage();
|
||||
errorMsg = "could not connect to master at " + _masterInfo._endpoint +
|
||||
": " + _client->getErrorMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_NO_RESPONSE;
|
||||
}
|
||||
|
@ -2132,9 +2144,9 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (response->wasHttpError()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
_masterInfo._endpoint + ": HTTP " +
|
||||
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
|
||||
response->getHttpReturnMessage();
|
||||
|
||||
return TRI_ERROR_REPLICATION_MASTER_ERROR;
|
||||
}
|
||||
|
@ -2144,8 +2156,8 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
std::string(_masterInfo._endpoint) +
|
||||
": response is no array";
|
||||
std::string(_masterInfo._endpoint) +
|
||||
": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -2153,8 +2165,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
VPackSlice const slice = builder->slice();
|
||||
if (!slice.isArray()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": response is no array";
|
||||
_masterInfo._endpoint + ": response is no array";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -2162,8 +2173,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
for (auto const& it : VPackArrayIterator(slice)) {
|
||||
if (!it.isObject()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": document is no object";
|
||||
_masterInfo._endpoint + ": document is no object";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -2172,8 +2182,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (!keySlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": document key is invalid";
|
||||
_masterInfo._endpoint + ": document key is invalid";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
@ -2182,8 +2191,7 @@ int InitialSyncer::handleSyncKeysMMFiles(arangodb::LogicalCollection* col,
|
|||
|
||||
if (!revSlice.isString()) {
|
||||
errorMsg = "got invalid response from master at " +
|
||||
_masterInfo._endpoint +
|
||||
": document revision is invalid";
|
||||
_masterInfo._endpoint + ": document revision is invalid";
|
||||
|
||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||
}
|
||||
|
|
|
@ -23,6 +23,7 @@
|
|||
/// @author Jan Christoph Uhde
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Basics/StringRef.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBComparator.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
|
@ -35,6 +36,7 @@
|
|||
#include <rocksdb/comparator.h>
|
||||
#include <rocksdb/convenience.h>
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include "Logger/Logger.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
@ -127,6 +129,68 @@ void uint64ToPersistent(std::string& p, uint64_t value) {
|
|||
} while (++len < sizeof(uint64_t));
|
||||
}
|
||||
|
||||
bool hasObjectIds(VPackSlice const& inputSlice) {
|
||||
bool rv = false;
|
||||
if (inputSlice.isObject()) {
|
||||
for (auto const& objectPair :
|
||||
arangodb::velocypack::ObjectIterator(inputSlice)) {
|
||||
if (arangodb::StringRef(objectPair.key) == "objectId") {
|
||||
return true;
|
||||
}
|
||||
rv = hasObjectIds(objectPair.value);
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
}
|
||||
} else if (inputSlice.isArray()) {
|
||||
for (auto const& slice : arangodb::velocypack::ArrayIterator(inputSlice)) {
|
||||
if (rv) {
|
||||
return rv;
|
||||
}
|
||||
rv = hasObjectIds(slice);
|
||||
}
|
||||
}
|
||||
return rv;
|
||||
}
|
||||
|
||||
VPackBuilder& stripObjectIdsImpl(VPackBuilder& builder, VPackSlice const& inputSlice) {
|
||||
if (inputSlice.isObject()) {
|
||||
builder.openObject();
|
||||
for (auto const& objectPair :
|
||||
arangodb::velocypack::ObjectIterator(inputSlice)) {
|
||||
if (arangodb::StringRef(objectPair.key) == "objectId") {
|
||||
continue;
|
||||
}
|
||||
builder.add(objectPair.key);
|
||||
stripObjectIdsImpl(builder, objectPair.value);
|
||||
}
|
||||
builder.close();
|
||||
} else if (inputSlice.isArray()) {
|
||||
builder.openArray();
|
||||
for (auto const& slice : arangodb::velocypack::ArrayIterator(inputSlice)) {
|
||||
stripObjectIdsImpl(builder, slice);
|
||||
}
|
||||
builder.close();
|
||||
} else {
|
||||
builder.add(inputSlice);
|
||||
}
|
||||
return builder;
|
||||
}
|
||||
|
||||
std::pair<VPackSlice, std::unique_ptr<VPackBuffer<uint8_t>>> stripObjectIds(
|
||||
VPackSlice const& inputSlice, bool checkBeforeCopy) {
|
||||
std::unique_ptr<VPackBuffer<uint8_t>> buffer = nullptr;
|
||||
if (checkBeforeCopy) {
|
||||
if (!hasObjectIds(inputSlice)) {
|
||||
return {inputSlice, std::move(buffer)};
|
||||
}
|
||||
}
|
||||
buffer.reset(new VPackBuffer<uint8_t>);
|
||||
VPackBuilder builder(*buffer);
|
||||
stripObjectIdsImpl(builder, inputSlice);
|
||||
return {VPackSlice(buffer->data()), std::move(buffer)};
|
||||
}
|
||||
|
||||
RocksDBTransactionState* toRocksTransactionState(transaction::Methods* trx) {
|
||||
TRI_ASSERT(trx != nullptr);
|
||||
TransactionState* state = trx->state();
|
||||
|
|
|
@ -88,6 +88,10 @@ arangodb::Result convertStatus(rocksdb::Status const&,
|
|||
uint64_t uint64FromPersistent(char const* p);
|
||||
void uint64ToPersistent(char* p, uint64_t value);
|
||||
void uint64ToPersistent(std::string& out, uint64_t value);
|
||||
|
||||
std::pair<VPackSlice, std::unique_ptr<VPackBuffer<uint8_t>>> stripObjectIds(
|
||||
VPackSlice const& inputSlice, bool checkBeforeCopy = true);
|
||||
|
||||
RocksDBTransactionState* toRocksTransactionState(transaction::Methods* trx);
|
||||
rocksdb::TransactionDB* globalRocksDB();
|
||||
RocksDBEngine* globalRocksEngine();
|
||||
|
|
|
@ -251,7 +251,7 @@ int RocksDBEdgeIndex::remove(transaction::Methods* trx,
|
|||
void RocksDBEdgeIndex::batchInsert(
|
||||
transaction::Methods* trx,
|
||||
std::vector<std::pair<TRI_voc_rid_t, VPackSlice>> const& documents,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
// acquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
|
|
|
@ -111,7 +111,7 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
|
|||
void batchInsert(
|
||||
transaction::Methods*,
|
||||
std::vector<std::pair<TRI_voc_rid_t, arangodb::velocypack::Slice>> const&,
|
||||
arangodb::basics::LocalTaskQueue* queue = nullptr) override;
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) override;
|
||||
|
||||
int drop() override;
|
||||
|
||||
|
|
|
@ -30,12 +30,14 @@
|
|||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/Thread.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/build.h"
|
||||
#include "GeneralServer/RestHandlerFactory.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "ProgramOptions/Section.h"
|
||||
#include "RestHandler/RestHandlerCreator.h"
|
||||
#include "RestServer/DatabasePathFeature.h"
|
||||
#include "RestServer/ServerIdFeature.h"
|
||||
#include "RestServer/ViewTypesFeature.h"
|
||||
#include "RocksDBEngine/RocksDBBackgroundThread.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
|
@ -54,6 +56,7 @@
|
|||
#include "RocksDBEngine/RocksDBV8Functions.h"
|
||||
#include "RocksDBEngine/RocksDBValue.h"
|
||||
#include "RocksDBEngine/RocksDBView.h"
|
||||
#include "VocBase/replication-applier.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <rocksdb/convenience.h>
|
||||
|
@ -300,7 +303,6 @@ void RocksDBEngine::getDatabases(arangodb::velocypack::Builder& result) {
|
|||
|
||||
rocksdb::ReadOptions readOptions;
|
||||
std::unique_ptr<rocksdb::Iterator> iter(_db->NewIterator(readOptions));
|
||||
|
||||
result.openArray();
|
||||
auto rSlice = rocksDBSlice(RocksDBEntryType::Database);
|
||||
for (iter->Seek(rSlice); iter->Valid() && iter->key().starts_with(rSlice);
|
||||
|
@ -849,6 +851,58 @@ std::pair<TRI_voc_tick_t, TRI_voc_cid_t> RocksDBEngine::mapObjectToCollection(
|
|||
return it->second;
|
||||
}
|
||||
|
||||
Result RocksDBEngine::createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder){
|
||||
Result res;
|
||||
|
||||
rocksdb::Status status = _db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
res = rocksutils::convertStatus(status).errorNumber();
|
||||
return res;
|
||||
}
|
||||
|
||||
builder.add(VPackValue(VPackValueType::Object)); // Base
|
||||
rocksdb::SequenceNumber lastTick = _db->GetLatestSequenceNumber();
|
||||
|
||||
// "state" part
|
||||
builder.add("state", VPackValue(VPackValueType::Object)); //open
|
||||
builder.add("running", VPackValue(true));
|
||||
builder.add("lastLogTick", VPackValue(std::to_string(lastTick)));
|
||||
builder.add("lastUncommittedLogTick", VPackValue(std::to_string(lastTick)));
|
||||
builder.add("totalEvents", VPackValue(0)); // s.numEvents + s.numEventsSync
|
||||
builder.add("time", VPackValue(utilities::timeString()));
|
||||
builder.close();
|
||||
|
||||
// "server" part
|
||||
builder.add("server", VPackValue(VPackValueType::Object)); //open
|
||||
builder.add("version", VPackValue(ARANGODB_VERSION));
|
||||
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
|
||||
builder.close();
|
||||
|
||||
// "clients" part
|
||||
builder.add("clients", VPackValue(VPackValueType::Array)); //open
|
||||
if(vocbase != nullptr) { //add clients
|
||||
auto allClients = vocbase->getReplicationClients();
|
||||
for (auto& it : allClients) {
|
||||
// One client
|
||||
builder.add(VPackValue(VPackValueType::Object));
|
||||
builder.add("serverId", VPackValue(std::to_string(std::get<0>(it))));
|
||||
|
||||
char buffer[21];
|
||||
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
|
||||
builder.add("time", VPackValue(buffer));
|
||||
|
||||
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
|
||||
|
||||
builder.close();
|
||||
}
|
||||
}
|
||||
builder.close(); // clients
|
||||
|
||||
builder.close(); // base
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
Result RocksDBEngine::dropDatabase(TRI_voc_tick_t id) {
|
||||
using namespace rocksutils;
|
||||
Result res;
|
||||
|
|
|
@ -248,6 +248,8 @@ class RocksDBEngine final : public StorageEngine {
|
|||
void addCollectionMapping(uint64_t, TRI_voc_tick_t, TRI_voc_cid_t);
|
||||
std::pair<TRI_voc_tick_t, TRI_voc_cid_t> mapObjectToCollection(uint64_t);
|
||||
|
||||
Result createLoggerState(TRI_vocbase_t* vocbase, VPackBuilder& builder);
|
||||
|
||||
private:
|
||||
Result dropDatabase(TRI_voc_tick_t);
|
||||
bool systemDatabaseExists();
|
||||
|
|
|
@ -48,6 +48,7 @@ double const RocksDBReplicationContext::DefaultTTL = 30 * 60.0;
|
|||
RocksDBReplicationContext::RocksDBReplicationContext()
|
||||
: _id(TRI_NewTickServer()),
|
||||
_lastTick(0),
|
||||
_currentTick(0),
|
||||
_trx(),
|
||||
_collection(nullptr),
|
||||
_iter(),
|
||||
|
@ -77,21 +78,26 @@ uint64_t RocksDBReplicationContext::count() const {
|
|||
|
||||
// creates new transaction/snapshot
|
||||
void RocksDBReplicationContext::bind(TRI_vocbase_t* vocbase) {
|
||||
releaseDumpingResources();
|
||||
_trx = createTransaction(vocbase);
|
||||
if ((_trx.get() == nullptr) || (_trx->vocbase() != vocbase)) {
|
||||
releaseDumpingResources();
|
||||
_trx = createTransaction(vocbase);
|
||||
}
|
||||
}
|
||||
|
||||
int RocksDBReplicationContext::bindCollection(
|
||||
std::string const& collectionName) {
|
||||
if ((_collection == nullptr) || _collection->name() != collectionName) {
|
||||
if ((_collection == nullptr) ||
|
||||
((_collection->name() != collectionName) &&
|
||||
std::to_string(_collection->cid()) != collectionName)) {
|
||||
_collection = _trx->vocbase()->lookupCollection(collectionName);
|
||||
|
||||
if (_collection == nullptr) {
|
||||
return TRI_ERROR_BAD_PARAMETER;
|
||||
}
|
||||
|
||||
_trx->addCollectionAtRuntime(collectionName);
|
||||
_iter = _collection->getAllIterator(_trx.get(), &_mdr,
|
||||
false); //_mdr is not used nor updated
|
||||
_currentTick = 1;
|
||||
_hasMore = true;
|
||||
}
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
@ -174,13 +180,19 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
|
|||
try {
|
||||
_hasMore = _iter->next(cb, 10); // TODO: adjust limit?
|
||||
} catch (std::exception const& ex) {
|
||||
_hasMore = false;
|
||||
return RocksDBReplicationResult(TRI_ERROR_INTERNAL, _lastTick);
|
||||
} catch (RocksDBReplicationResult const& ex) {
|
||||
_hasMore = false;
|
||||
return ex;
|
||||
}
|
||||
}
|
||||
|
||||
return RocksDBReplicationResult(TRI_ERROR_NO_ERROR, _lastTick);
|
||||
if (_hasMore) {
|
||||
_currentTick++;
|
||||
}
|
||||
|
||||
return RocksDBReplicationResult(TRI_ERROR_NO_ERROR, _currentTick);
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
|
||||
|
|
|
@ -106,6 +106,7 @@ class RocksDBReplicationContext {
|
|||
private:
|
||||
TRI_voc_tick_t _id;
|
||||
uint64_t _lastTick;
|
||||
uint64_t _currentTick;
|
||||
std::unique_ptr<transaction::Methods> _trx;
|
||||
LogicalCollection* _collection;
|
||||
std::unique_ptr<IndexIterator> _iter;
|
||||
|
|
|
@ -328,57 +328,12 @@ bool RocksDBRestReplicationHandler::isCoordinatorError() {
|
|||
|
||||
void RocksDBRestReplicationHandler::handleCommandLoggerState() {
|
||||
VPackBuilder builder;
|
||||
builder.add(VPackValue(VPackValueType::Object)); // Base
|
||||
|
||||
// MMFilesLogfileManager::instance()->waitForSync(10.0);
|
||||
// MMFilesLogfileManagerState const s =
|
||||
// MMFilesLogfileManager::instance()->state();
|
||||
rocksdb::TransactionDB* db =
|
||||
static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->db();
|
||||
rocksdb::Status status = db->GetBaseDB()->SyncWAL();
|
||||
if (!status.ok()) {
|
||||
Result res = rocksutils::convertStatus(status).errorNumber();
|
||||
auto res = globalRocksEngine()->createLoggerState(_vocbase, builder);
|
||||
if (res.fail()) {
|
||||
generateError(rest::ResponseCode::BAD, res.errorNumber(),
|
||||
res.errorMessage());
|
||||
return;
|
||||
}
|
||||
rocksdb::SequenceNumber lastTick = latestSequenceNumber();
|
||||
// "state" part
|
||||
builder.add("state", VPackValue(VPackValueType::Object));
|
||||
builder.add("running", VPackValue(true));
|
||||
builder.add("lastLogTick", VPackValue(StringUtils::itoa(lastTick)));
|
||||
builder.add("lastUncommittedLogTick",
|
||||
VPackValue(StringUtils::itoa(lastTick + 1)));
|
||||
builder.add("totalEvents", VPackValue(0)); // s.numEvents + s.numEventsSync
|
||||
builder.add("time", VPackValue(utilities::timeString()));
|
||||
builder.close();
|
||||
|
||||
// "server" part
|
||||
builder.add("server", VPackValue(VPackValueType::Object));
|
||||
builder.add("version", VPackValue(ARANGODB_VERSION));
|
||||
builder.add("serverId", VPackValue(std::to_string(ServerIdFeature::getId())));
|
||||
builder.close();
|
||||
|
||||
// "clients" part
|
||||
builder.add("clients", VPackValue(VPackValueType::Array));
|
||||
auto allClients = _vocbase->getReplicationClients();
|
||||
for (auto& it : allClients) {
|
||||
// One client
|
||||
builder.add(VPackValue(VPackValueType::Object));
|
||||
builder.add("serverId", VPackValue(std::to_string(std::get<0>(it))));
|
||||
|
||||
char buffer[21];
|
||||
TRI_GetTimeStampReplication(std::get<1>(it), &buffer[0], sizeof(buffer));
|
||||
builder.add("time", VPackValue(buffer));
|
||||
|
||||
builder.add("lastServedTick", VPackValue(std::to_string(std::get<2>(it))));
|
||||
|
||||
builder.close();
|
||||
}
|
||||
builder.close(); // clients
|
||||
|
||||
builder.close(); // base
|
||||
|
||||
generateResult(rest::ResponseCode::OK, builder.slice());
|
||||
}
|
||||
|
||||
|
@ -871,7 +826,8 @@ void RocksDBRestReplicationHandler::handleCommandRestoreCollection() {
|
|||
"invalid JSON");
|
||||
return;
|
||||
}
|
||||
VPackSlice const slice = parsedRequest->slice();
|
||||
auto pair = stripObjectIds(parsedRequest->slice());
|
||||
VPackSlice const slice = pair.first;
|
||||
|
||||
bool overwrite = false;
|
||||
|
||||
|
@ -1775,14 +1731,15 @@ int RocksDBRestReplicationHandler::processRestoreCollectionCoordinator(
|
|||
if (dropExisting) {
|
||||
int res = ci->dropCollectionCoordinator(dbName, col->cid_as_string(),
|
||||
errorMsg, 0.0);
|
||||
if (res == TRI_ERROR_FORBIDDEN) {
|
||||
if (res == TRI_ERROR_FORBIDDEN ||
|
||||
res == TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE) {
|
||||
// some collections must not be dropped
|
||||
res = truncateCollectionOnCoordinator(dbName, name);
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
errorMsg =
|
||||
"unable to truncate collection (dropping is forbidden): " + name;
|
||||
return res;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
|
|
@ -63,46 +63,44 @@ static void JS_StateLoggerReplication(
|
|||
v8::HandleScope scope(isolate);
|
||||
|
||||
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
|
||||
v8::Handle<v8::Object> state = v8::Object::New(isolate);
|
||||
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
|
||||
|
||||
if(engineName == "mmfiles"){
|
||||
v8::Handle<v8::Object> state = v8::Object::New(isolate);
|
||||
MMFilesLogfileManagerState const s = MMFilesLogfileManager::instance()->state();
|
||||
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
|
||||
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastCommittedTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastAssignedTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
|
||||
v8::Number::New(isolate, static_cast<double>(s.numEvents + s.numEventsSync)));
|
||||
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(s.timeString));
|
||||
result->Set(TRI_V8_ASCII_STRING("state"), state);
|
||||
|
||||
v8::Handle<v8::Object> server = v8::Object::New(isolate);
|
||||
server->Set(TRI_V8_ASCII_STRING("version"),
|
||||
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
|
||||
server->Set(TRI_V8_ASCII_STRING("serverId"),
|
||||
TRI_V8_STD_STRING(StringUtils::itoa(ServerIdFeature::getId())));
|
||||
result->Set(TRI_V8_ASCII_STRING("server"), server);
|
||||
|
||||
v8::Handle<v8::Object> clients = v8::Object::New(isolate);
|
||||
result->Set(TRI_V8_ASCII_STRING("clients"), clients);
|
||||
} else if (engineName == "rocksdb") {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
uint64_t lastTick = db->GetLatestSequenceNumber();
|
||||
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
|
||||
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"),
|
||||
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
|
||||
v8::Number::New(isolate, static_cast<double>(0))); //s.numEvents + s.numEventsSync)));
|
||||
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(utilities::timeString()));
|
||||
VPackBuilder builder;
|
||||
auto res = rocksutils::globalRocksEngine()->createLoggerState(nullptr,builder);
|
||||
if(res.fail()){
|
||||
TRI_V8_THROW_EXCEPTION(res);
|
||||
return;
|
||||
}
|
||||
v8::Handle<v8::Value>resultValue = TRI_VPackToV8(isolate, builder.slice());
|
||||
result = v8::Handle<v8::Object>::Cast(resultValue);
|
||||
} else {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid storage engine");
|
||||
return;
|
||||
}
|
||||
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
result->Set(TRI_V8_ASCII_STRING("state"), state);
|
||||
|
||||
v8::Handle<v8::Object> server = v8::Object::New(isolate);
|
||||
server->Set(TRI_V8_ASCII_STRING("version"),
|
||||
TRI_V8_ASCII_STRING(ARANGODB_VERSION));
|
||||
server->Set(TRI_V8_ASCII_STRING("serverId"),
|
||||
TRI_V8_STD_STRING(StringUtils::itoa(ServerIdFeature::getId())));
|
||||
result->Set(TRI_V8_ASCII_STRING("server"), server);
|
||||
|
||||
v8::Handle<v8::Object> clients = v8::Object::New(isolate);
|
||||
result->Set(TRI_V8_ASCII_STRING("clients"), clients);
|
||||
|
||||
TRI_V8_RETURN(result);
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
|
|
@ -1,160 +0,0 @@
|
|||
# - Check which parts of the C++11 standard the compiler supports
|
||||
#
|
||||
# When found it will set the following variables
|
||||
#
|
||||
# CXX11_COMPILER_FLAGS - the compiler flags needed to get C++11 features
|
||||
#
|
||||
# HAS_CXX11_AUTO - auto keyword
|
||||
# HAS_CXX11_AUTO_RET_TYPE - function declaration with deduced return types
|
||||
# HAS_CXX11_CLASS_OVERRIDE - override and final keywords for classes and methods
|
||||
# HAS_CXX11_CONSTEXPR - constexpr keyword
|
||||
# HAS_CXX11_CSTDINT_H - cstdint header
|
||||
# HAS_CXX11_DECLTYPE - decltype keyword
|
||||
# HAS_CXX11_FUNC - __func__ preprocessor constant
|
||||
# HAS_CXX11_INITIALIZER_LIST - initializer list
|
||||
# HAS_CXX11_LAMBDA - lambdas
|
||||
# HAS_CXX11_LIB_REGEX - regex library
|
||||
# HAS_CXX11_LONG_LONG - long long signed & unsigned types
|
||||
# HAS_CXX11_NULLPTR - nullptr
|
||||
# HAS_CXX11_RVALUE_REFERENCES - rvalue references
|
||||
# HAS_CXX11_SIZEOF_MEMBER - sizeof() non-static members
|
||||
# HAS_CXX11_STATIC_ASSERT - static_assert()
|
||||
# HAS_CXX11_VARIADIC_TEMPLATES - variadic templates
|
||||
# HAS_CXX11_SHARED_PTR - Shared Pointer
|
||||
# HAS_CXX11_THREAD - thread
|
||||
# HAS_CXX11_MUTEX - mutex
|
||||
# HAS_CXX11_NOEXCEPT - noexcept
|
||||
# HAS_CXX11_CONDITIONAL - conditional type definitions
|
||||
|
||||
#=============================================================================
|
||||
# Copyright 2011,2012 Rolf Eike Beer <eike@sf-mail.de>
|
||||
# Copyright 2012 Andreas Weis
|
||||
# Copyright 2014 Kaveh Vahedipour <kaveh@codeare.org>
|
||||
#
|
||||
# Distributed under the OSI-approved BSD License (the "License");
|
||||
# see accompanying file Copyright.txt for details.
|
||||
#
|
||||
# This software is distributed WITHOUT ANY WARRANTY; without even the
|
||||
# implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
# See the License for more information.
|
||||
#=============================================================================
|
||||
# (To distribute this file outside of CMake, substitute the full
|
||||
# License text for the above reference.)
|
||||
|
||||
#
|
||||
# Each feature may have up to 3 checks, every one of them in it's own file
|
||||
# FEATURE.cpp - example that must build and return 0 when run
|
||||
# FEATURE_fail.cpp - example that must build, but may not return 0 when run
|
||||
# FEATURE_fail_compile.cpp - example that must fail compilation
|
||||
#
|
||||
# The first one is mandatory, the latter 2 are optional and do not depend on
|
||||
# each other (i.e. only one may be present).
|
||||
#
|
||||
# Modification for std::thread (Kaveh Vahdipour, Forschungszentrum Juelich)
|
||||
#
|
||||
|
||||
IF (NOT CMAKE_CXX_COMPILER_LOADED)
|
||||
message(FATAL_ERROR "CheckCXX11Features modules only works if language CXX is enabled")
|
||||
endif ()
|
||||
|
||||
cmake_minimum_required(VERSION 2.8.3)
|
||||
|
||||
#
|
||||
### Check for needed compiler flags
|
||||
#
|
||||
include(CheckCXXCompilerFlag)
|
||||
check_cxx_compiler_flag("-std=c++11" _HAS_CXX11_FLAG)
|
||||
if (NOT _HAS_CXX11_FLAG)
|
||||
check_cxx_compiler_flag("-std=c++0x" _HAS_CXX0X_FLAG)
|
||||
endif ()
|
||||
|
||||
if (_HAS_CXX11_FLAG)
|
||||
set(CXX11_COMPILER_FLAGS "-std=c++11")
|
||||
elseif (_HAS_CXX0X_FLAG)
|
||||
set(CXX11_COMPILER_FLAGS "-std=c++0x")
|
||||
endif ()
|
||||
|
||||
function(cxx11_check_feature FEATURE_NAME RESULT_VAR)
|
||||
if (NOT DEFINED ${RESULT_VAR})
|
||||
set(_bindir "${CMAKE_CURRENT_BINARY_DIR}/cxx11/${FEATURE_NAME}")
|
||||
|
||||
set(_SRCFILE_BASE ${CMAKE_CURRENT_LIST_DIR}/CheckCXX11Features/cxx11-test-${FEATURE_NAME})
|
||||
set(_LOG_NAME "\"${FEATURE_NAME}\"")
|
||||
message(STATUS "Checking C++11 support for ${_LOG_NAME}")
|
||||
|
||||
set(_SRCFILE "${_SRCFILE_BASE}.cpp")
|
||||
set(_SRCFILE_FAIL "${_SRCFILE_BASE}_fail.cpp")
|
||||
set(_SRCFILE_FAIL_COMPILE "${_SRCFILE_BASE}_fail_compile.cpp")
|
||||
|
||||
if (CROSS_COMPILING)
|
||||
try_compile(${RESULT_VAR} "${_bindir}" "${_SRCFILE}"
|
||||
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
|
||||
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
|
||||
try_compile(${RESULT_VAR} "${_bindir}_fail" "${_SRCFILE_FAIL}"
|
||||
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
|
||||
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
|
||||
else (CROSS_COMPILING)
|
||||
try_run(_RUN_RESULT_VAR _COMPILE_RESULT_VAR
|
||||
"${_bindir}" "${_SRCFILE}"
|
||||
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
|
||||
if (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
|
||||
set(${RESULT_VAR} TRUE)
|
||||
else (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
|
||||
set(${RESULT_VAR} FALSE)
|
||||
endif (_COMPILE_RESULT_VAR AND NOT _RUN_RESULT_VAR)
|
||||
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
|
||||
try_run(_RUN_RESULT_VAR _COMPILE_RESULT_VAR
|
||||
"${_bindir}_fail" "${_SRCFILE_FAIL}"
|
||||
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
|
||||
if (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
|
||||
set(${RESULT_VAR} TRUE)
|
||||
else (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
|
||||
set(${RESULT_VAR} FALSE)
|
||||
endif (_COMPILE_RESULT_VAR AND _RUN_RESULT_VAR)
|
||||
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL})
|
||||
endif (CROSS_COMPILING)
|
||||
if (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL_COMPILE})
|
||||
try_compile(_TMP_RESULT "${_bindir}_fail_compile" "${_SRCFILE_FAIL_COMPILE}"
|
||||
COMPILE_DEFINITIONS "${CXX11_COMPILER_FLAGS}")
|
||||
if (_TMP_RESULT)
|
||||
set(${RESULT_VAR} FALSE)
|
||||
else (_TMP_RESULT)
|
||||
set(${RESULT_VAR} TRUE)
|
||||
endif (_TMP_RESULT)
|
||||
endif (${RESULT_VAR} AND EXISTS ${_SRCFILE_FAIL_COMPILE})
|
||||
|
||||
if (${RESULT_VAR})
|
||||
message(STATUS "Checking C++11 support for ${_LOG_NAME}: works")
|
||||
else (${RESULT_VAR})
|
||||
message(FATAL_ERROR "Checking C++11 support for ${_LOG_NAME}: not supported")
|
||||
endif (${RESULT_VAR})
|
||||
set(${RESULT_VAR} ${${RESULT_VAR}} CACHE INTERNAL "C++11 support for ${_LOG_NAME}")
|
||||
endif (NOT DEFINED ${RESULT_VAR})
|
||||
endfunction(cxx11_check_feature)
|
||||
|
||||
cxx11_check_feature("__func__" HAS_CXX11_FUNC)
|
||||
cxx11_check_feature("auto" HAS_CXX11_AUTO)
|
||||
cxx11_check_feature("auto_ret_type" HAS_CXX11_AUTO_RET_TYPE)
|
||||
#cxx11_check_feature("atomic_uint_fast16_t" HAS_CXX11_ATOMIC_UINT_FAST16_T)
|
||||
cxx11_check_feature("class_override_final" HAS_CXX11_CLASS_OVERRIDE)
|
||||
cxx11_check_feature("constexpr" HAS_CXX11_CONSTEXPR)
|
||||
cxx11_check_feature("conditional" HAS_CXX11_CONDITIONAL)
|
||||
#cxx11_check_feature("cstdint" HAS_CXX11_CSTDINT_H)
|
||||
cxx11_check_feature("decltype" HAS_CXX11_DECLTYPE)
|
||||
cxx11_check_feature("initializer_list" HAS_CXX11_INITIALIZER_LIST)
|
||||
cxx11_check_feature("lambda" HAS_CXX11_LAMBDA)
|
||||
cxx11_check_feature("range_based_for_loop" HAS_CXX11_RANGE_BASED_FOR_LOOP)
|
||||
#cxx11_check_feature("long_long" HAS_CXX11_LONG_LONG)
|
||||
cxx11_check_feature("nullptr" HAS_CXX11_NULLPTR)
|
||||
cxx11_check_feature("tuple" HAS_CXX11_TUPLE)
|
||||
cxx11_check_feature("regex" HAS_CXX11_LIB_REGEX)
|
||||
cxx11_check_feature("rvalue-references" HAS_CXX11_RVALUE_REFERENCES)
|
||||
cxx11_check_feature("sizeof_member" HAS_CXX11_SIZEOF_MEMBER)
|
||||
cxx11_check_feature("static_assert" HAS_CXX11_STATIC_ASSERT)
|
||||
cxx11_check_feature("variadic_templates" HAS_CXX11_VARIADIC_TEMPLATES)
|
||||
cxx11_check_feature("shared_ptr" HAS_CXX11_SHARED_PTR)
|
||||
cxx11_check_feature("unique_ptr" HAS_CXX11_UNIQUE_PTR)
|
||||
cxx11_check_feature("weak_ptr" HAS_CXX11_WEAK_PTR)
|
||||
cxx11_check_feature("thread" HAS_CXX11_THREAD)
|
||||
cxx11_check_feature("mutex" HAS_CXX11_MUTEX)
|
||||
cxx11_check_feature("noexcept" HAS_CXX11_NOEXCEPT)
|
|
@ -1,8 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
if (!__func__)
|
||||
return 1;
|
||||
if (!(*__func__))
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#include <atomic>
|
||||
|
||||
int main () {
|
||||
std::atomic_uint_fast16_t a;
|
||||
return 0;
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
|
||||
int main()
|
||||
{
|
||||
auto i = 5;
|
||||
auto f = 3.14159f;
|
||||
auto d = 3.14159;
|
||||
bool ret = (
|
||||
(sizeof(f) < sizeof(d)) &&
|
||||
(sizeof(i) == sizeof(int))
|
||||
);
|
||||
return ret ? 0 : 1;
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
// must fail because there is no initializer
|
||||
auto i;
|
||||
|
||||
return 0;
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
auto foo(int i) -> int {
|
||||
return i - 1;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
return foo(1);
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
class base {
|
||||
public:
|
||||
virtual int foo(int a)
|
||||
{ return 4 + a; }
|
||||
int bar(int a)
|
||||
{ return a - 2; }
|
||||
};
|
||||
|
||||
class sub final : public base {
|
||||
public:
|
||||
virtual int foo(int a) override
|
||||
{ return 8 + 2 * a; };
|
||||
};
|
||||
|
||||
class sub2 final : public base {
|
||||
public:
|
||||
virtual int foo(int a) override final
|
||||
{ return 8 + 2 * a; };
|
||||
};
|
||||
|
||||
int main(void)
|
||||
{
|
||||
base b;
|
||||
sub s;
|
||||
sub2 t;
|
||||
|
||||
return (b.foo(2) * 2 == s.foo(2) && b.foo(2) * 2 == t.foo(2) ) ? 0 : 1;
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
class base {
|
||||
public:
|
||||
virtual int foo(int a)
|
||||
{ return 4 + a; }
|
||||
virtual int bar(int a) final
|
||||
{ return a - 2; }
|
||||
};
|
||||
|
||||
class sub final : public base {
|
||||
public:
|
||||
virtual int foo(int a) override
|
||||
{ return 8 + 2 * a; };
|
||||
virtual int bar(int a)
|
||||
{ return a; }
|
||||
};
|
||||
|
||||
class impossible : public sub { };
|
||||
|
||||
int main(void)
|
||||
{
|
||||
base b;
|
||||
sub s;
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
#include <type_traits>
|
||||
#include <string>
|
||||
|
||||
template<class T> class A {
|
||||
public:
|
||||
typedef typename std::conditional<false, const std::string, std::string>::type StringType;
|
||||
A() : s(""), t(0) {}
|
||||
virtual ~A () {}
|
||||
private:
|
||||
StringType s;
|
||||
T t;
|
||||
};
|
||||
|
||||
int main() {
|
||||
A<float> a;
|
||||
return 0;
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
constexpr int square(int x)
|
||||
{
|
||||
return x*x;
|
||||
}
|
||||
|
||||
constexpr int the_answer()
|
||||
{
|
||||
return 42;
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
int test_arr[square(3)];
|
||||
bool ret = (
|
||||
(square(the_answer()) == 1764) &&
|
||||
(sizeof(test_arr)/sizeof(test_arr[0]) == 9)
|
||||
);
|
||||
return ret ? 0 : 1;
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
#include <cstdint>
|
||||
|
||||
int main()
|
||||
{
|
||||
bool test =
|
||||
(sizeof(int8_t) == 1) &&
|
||||
(sizeof(int16_t) == 2) &&
|
||||
(sizeof(int32_t) == 4) &&
|
||||
(sizeof(int64_t) == 8);
|
||||
return test ? 0 : 1;
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
bool check_size(int i)
|
||||
{
|
||||
return sizeof(int) == sizeof(decltype(i));
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
bool ret = check_size(42);
|
||||
return ret ? 0 : 1;
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
#include <vector>
|
||||
|
||||
class seq {
|
||||
public:
|
||||
seq(std::initializer_list<int> list);
|
||||
|
||||
int length() const;
|
||||
private:
|
||||
std::vector<int> m_v;
|
||||
};
|
||||
|
||||
seq::seq(std::initializer_list<int> list)
|
||||
: m_v(list)
|
||||
{
|
||||
}
|
||||
|
||||
int seq::length() const
|
||||
{
|
||||
return m_v.size();
|
||||
}
|
||||
|
||||
int main(void)
|
||||
{
|
||||
seq a = {18, 20, 2, 0, 4, 7};
|
||||
|
||||
return (a.length() == 6) ? 0 : 1;
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
int main()
|
||||
{
|
||||
int ret = 0;
|
||||
return ([&ret]() -> int { return ret; })();
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
long long l;
|
||||
unsigned long long ul;
|
||||
|
||||
return ((sizeof(l) >= 8) && (sizeof(ul) >= 8)) ? 0 : 1;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#include <mutex>
|
||||
|
||||
int main() {
|
||||
std::mutex _mutex;
|
||||
return 0;
|
||||
}
|
|
@ -1,8 +0,0 @@
|
|||
volatile void dummy () noexcept {
|
||||
int a = 0;
|
||||
}
|
||||
|
||||
int main () {
|
||||
dummy();
|
||||
return 0;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
void *v = nullptr;
|
||||
|
||||
return v ? 1 : 0;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
int i = nullptr;
|
||||
|
||||
return 1;
|
||||
}
|
|
@ -1,15 +0,0 @@
|
|||
|
||||
|
||||
int main() {
|
||||
int my_array[5] = {1, 2, 3, 4, 5};
|
||||
|
||||
for (int &x : my_array) {
|
||||
x *= 2;
|
||||
}
|
||||
|
||||
for (auto &x : my_array) {
|
||||
x *= 2;
|
||||
}
|
||||
|
||||
}
|
||||
|
|
@ -1,26 +0,0 @@
|
|||
#include <algorithm>
|
||||
#include <regex>
|
||||
|
||||
int parse_line(std::string const& line)
|
||||
{
|
||||
std::string tmp;
|
||||
if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+//(-)?(\\d)+(\\s)+"))) {
|
||||
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+//(-)?(\\d)+"), std::string("V"));
|
||||
} else if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+/(-)?(\\d)+(\\s)+"))) {
|
||||
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+/(-)?(\\d)+"), std::string("V"));
|
||||
} else if(std::regex_search(line, std::regex("(\\s)+(-)?(\\d)+/(-)?(\\d)+/(-)?(\\d)+(\\s)+"))) {
|
||||
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+/(-)?(\\d)+/(-)?(\\d)+"), std::string("V"));
|
||||
} else {
|
||||
tmp = std::regex_replace(line, std::regex("(-)?(\\d)+"), std::string("V"));
|
||||
}
|
||||
return static_cast<int>(std::count(tmp.begin(), tmp.end(), 'V'));
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
bool test = (parse_line("f 7/7/7 -3/3/-3 2/-2/2") == 3) &&
|
||||
(parse_line("f 7//7 3//-3 -2//2") == 3) &&
|
||||
(parse_line("f 7/7 3/-3 -2/2") == 3) &&
|
||||
(parse_line("f 7 3 -2") == 3);
|
||||
return test ? 0 : 1;
|
||||
}
|
|
@ -1,57 +0,0 @@
|
|||
#include <cassert>
|
||||
|
||||
class rvmove {
|
||||
public:
|
||||
void *ptr;
|
||||
char *array;
|
||||
|
||||
rvmove()
|
||||
: ptr(0),
|
||||
array(new char[10])
|
||||
{
|
||||
ptr = this;
|
||||
}
|
||||
|
||||
rvmove(rvmove &&other)
|
||||
: ptr(other.ptr),
|
||||
array(other.array)
|
||||
{
|
||||
other.array = 0;
|
||||
other.ptr = 0;
|
||||
}
|
||||
|
||||
~rvmove()
|
||||
{
|
||||
assert(((ptr != 0) && (array != 0)) || ((ptr == 0) && (array == 0)));
|
||||
delete[] array;
|
||||
}
|
||||
|
||||
rvmove &operator=(rvmove &&other)
|
||||
{
|
||||
delete[] array;
|
||||
ptr = other.ptr;
|
||||
array = other.array;
|
||||
other.array = 0;
|
||||
other.ptr = 0;
|
||||
return *this;
|
||||
}
|
||||
|
||||
static rvmove create()
|
||||
{
|
||||
return rvmove();
|
||||
}
|
||||
private:
|
||||
rvmove(const rvmove &);
|
||||
rvmove &operator=(const rvmove &);
|
||||
};
|
||||
|
||||
int main()
|
||||
{
|
||||
rvmove mine;
|
||||
if (mine.ptr != &mine)
|
||||
return 1;
|
||||
mine = rvmove::create();
|
||||
if (mine.ptr == &mine)
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#include <memory>
|
||||
|
||||
int main() {
|
||||
std::shared_ptr<int> test;
|
||||
return 0;
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
struct foo {
|
||||
char bar;
|
||||
int baz;
|
||||
};
|
||||
|
||||
int main(void)
|
||||
{
|
||||
bool ret = (
|
||||
(sizeof(foo::bar) == 1) &&
|
||||
(sizeof(foo::baz) >= sizeof(foo::bar)) &&
|
||||
(sizeof(foo) >= sizeof(foo::bar) + sizeof(foo::baz))
|
||||
);
|
||||
return ret ? 0 : 1;
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
struct foo {
|
||||
int baz;
|
||||
double bar;
|
||||
};
|
||||
|
||||
int main(void)
|
||||
{
|
||||
return (sizeof(foo::bar) == 4) ? 0 : 1;
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
static_assert(0 < 1, "your ordering of integers is screwed");
|
||||
return 0;
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
int main(void)
|
||||
{
|
||||
static_assert(1 < 0, "your ordering of integers is screwed");
|
||||
return 0;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#include <thread>
|
||||
|
||||
int main() {
|
||||
std::thread test;
|
||||
return 0;
|
||||
}
|
|
@ -1,10 +0,0 @@
|
|||
#include <tuple>
|
||||
|
||||
int main () {
|
||||
typedef std::tuple <int, double, long &, const char *> test_tuple;
|
||||
long lengthy = 12;
|
||||
test_tuple proof (18, 6.5, lengthy, "Ciao!");
|
||||
lengthy = std::get<0>(proof);
|
||||
std::get<3>(proof) = " Beautiful!";
|
||||
return 0;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#include <memory>
|
||||
|
||||
int main() {
|
||||
std::unique_ptr<int> test;
|
||||
return 0;
|
||||
}
|
|
@ -1,23 +0,0 @@
|
|||
int Accumulate()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
template<typename T, typename... Ts>
|
||||
int Accumulate(T v, Ts... vs)
|
||||
{
|
||||
return v + Accumulate(vs...);
|
||||
}
|
||||
|
||||
template<int... Is>
|
||||
int CountElements()
|
||||
{
|
||||
return sizeof...(Is);
|
||||
}
|
||||
|
||||
int main()
|
||||
{
|
||||
int acc = Accumulate(1, 2, 3, 4, -5);
|
||||
int count = CountElements<1,2,3,4,5>();
|
||||
return ((acc == 5) && (count == 5)) ? 0 : 1;
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
#include <memory>
|
||||
|
||||
int main() {
|
||||
std::weak_ptr<int> test;
|
||||
return 0;
|
||||
}
|
|
@ -74,7 +74,9 @@ macro (install_readme input output)
|
|||
if (MSVC)
|
||||
set(CRLFSTYLE "CRLF")
|
||||
endif ()
|
||||
configure_file(${PROJECT_SOURCE_DIR}/${input} "${PROJECT_BINARY_DIR}/${output}" NEWLINE_STYLE ${CRLFSTYLE})
|
||||
|
||||
install(
|
||||
CODE "configure_file(${PROJECT_SOURCE_DIR}/${input} \"${PROJECT_BINARY_DIR}/${output}\" NEWLINE_STYLE ${CRLFSTYLE})")
|
||||
install(
|
||||
FILES "${PROJECT_BINARY_DIR}/${output}"
|
||||
DESTINATION "${where}"
|
||||
|
|
|
@ -64,10 +64,13 @@ function collectionRepresentation(collection, showProperties, showCount, showFig
|
|||
result.indexBuckets = properties.indexBuckets;
|
||||
|
||||
if (cluster.isCoordinator()) {
|
||||
result.shardKeys = properties.shardKeys;
|
||||
result.avoidServers = properties.avoidServers;
|
||||
result.distributeShardsLike = properties.distributeShardsLike;
|
||||
result.numberOfShards = properties.numberOfShards;
|
||||
result.replicationFactor = properties.replicationFactor;
|
||||
result.avoidServers = properties.avoidServers;
|
||||
result.distributeShardsLike = properties.distributeShardsLike;
|
||||
result.shardKeys = properties.shardKeys;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
'nodes': 'nodes',
|
||||
'shards': 'shards',
|
||||
'node/:name': 'node',
|
||||
'nodeInfo/:id': 'nodeInfo',
|
||||
'logs': 'logger',
|
||||
'helpus': 'helpUs',
|
||||
'graph/:name': 'graph',
|
||||
|
@ -327,16 +328,40 @@
|
|||
return;
|
||||
}
|
||||
|
||||
if (!this.nodeView) {
|
||||
this.nodeView = new window.NodeView({
|
||||
coordname: name,
|
||||
coordinators: this.coordinatorCollection,
|
||||
dbServers: this.dbServers
|
||||
});
|
||||
if (this.nodeView) {
|
||||
this.nodeView.remove();
|
||||
}
|
||||
this.nodeView = new window.NodeView({
|
||||
coordname: name,
|
||||
coordinators: this.coordinatorCollection,
|
||||
dbServers: this.dbServers
|
||||
});
|
||||
this.nodeView.render();
|
||||
},
|
||||
|
||||
nodeInfo: function (id, initialized) {
|
||||
this.checkUser();
|
||||
if (!initialized || this.isCluster === undefined) {
|
||||
this.waitForInit(this.nodeInfo.bind(this), id);
|
||||
return;
|
||||
}
|
||||
if (this.isCluster === false) {
|
||||
this.routes[''] = 'dashboard';
|
||||
this.navigate('#dashboard', {trigger: true});
|
||||
return;
|
||||
}
|
||||
|
||||
if (this.nodeInfoView) {
|
||||
this.nodeInfoView.remove();
|
||||
}
|
||||
this.nodeInfoView = new window.NodeInfoView({
|
||||
nodeId: id,
|
||||
coordinators: this.coordinatorCollection,
|
||||
dbServers: this.dbServers[0]
|
||||
});
|
||||
this.nodeInfoView.render();
|
||||
},
|
||||
|
||||
shards: function (initialized) {
|
||||
this.checkUser();
|
||||
if (!initialized || this.isCluster === undefined) {
|
||||
|
@ -367,10 +392,11 @@
|
|||
this.navigate('#dashboard', {trigger: true});
|
||||
return;
|
||||
}
|
||||
if (!this.nodesView) {
|
||||
this.nodesView = new window.NodesView({
|
||||
});
|
||||
if (this.nodesView) {
|
||||
this.nodesView.remove();
|
||||
}
|
||||
this.nodesView = new window.NodesView({
|
||||
});
|
||||
this.nodesView.render();
|
||||
},
|
||||
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
<script id="nodeInfoView.ejs" type="text/template">
|
||||
|
||||
<div class="nodeInfoView">
|
||||
<div class="modal-body">
|
||||
<table id="serverInfoTable" class="arango-table">
|
||||
<tbody>
|
||||
<% _.each(entries, function (entry, name) { %>
|
||||
<tr>
|
||||
<th class="collectionInfoTh2"><%=name%></th>
|
||||
<th class="collectionInfoTh">
|
||||
<div id="server-<%=name%>" class="modal-text"><%=entry%></div>
|
||||
</th>
|
||||
<th>
|
||||
<% if (entry.description) { %>
|
||||
<th class="tooltipInfoTh">
|
||||
<span class="tippy" title="<%=entry.description%>"></span>
|
||||
</th>
|
||||
<% } %>
|
||||
</th>
|
||||
</tr>
|
||||
<% }); %>
|
||||
</tbody>
|
||||
</table>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
</script>
|
|
@ -47,10 +47,10 @@
|
|||
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title" style="clear: both">
|
||||
<div class="pure-table-row">
|
||||
<div class="pure-u-9-24 left">Name</div>
|
||||
<div class="pure-u-8-24 left">Endpoint</div>
|
||||
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
|
||||
<div class="pure-u-3-24 mid">Status</div>
|
||||
<div class="pure-u-1-24 mid"></div>
|
||||
<div class="pure-u-9-24 left">Endpoint</div>
|
||||
<div class="pure-u-2-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-2-24 mid">Info</div>
|
||||
<div class="pure-u-2-24 mid">Status</div>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
@ -67,16 +67,17 @@
|
|||
<i class="fa fa-trash-o"></i>
|
||||
<% } %>
|
||||
</div>
|
||||
<div class="pure-u-8-24 left"><%= node.Endpoint %></div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
|
||||
|
||||
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
|
||||
<div class="pure-u-3-24 hide-small mid"><%= formatted %></div>
|
||||
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div>
|
||||
<div class="pure-u-2-24 hide-small mid"><%= formatted %></div>
|
||||
|
||||
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
|
||||
|
||||
<% if(node.Status === 'GOOD') { %>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
|
||||
<% } else { %>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<% } %>
|
||||
|
||||
</div>
|
||||
|
@ -128,10 +129,10 @@
|
|||
<div class="pure-g cluster-nodes-title pure-table pure-table-header pure-title">
|
||||
<div class="pure-table-row">
|
||||
<div class="pure-u-9-24 left">Name</div>
|
||||
<div class="pure-u-8-24 left">Endpoint</div>
|
||||
<div class="pure-u-3-24 mid hide-small">Heartbeat</div>
|
||||
<div class="pure-u-3-24 mid">Status</div>
|
||||
<div class="pure-u-1-24 mid"></div>
|
||||
<div class="pure-u-9-24 left">Endpoint</div>
|
||||
<div class="pure-u-2-24 mid hide-small">Since</div>
|
||||
<div class="pure-u-2-24 mid">Info</div>
|
||||
<div class="pure-u-2-24 mid">Status</div>
|
||||
</div>
|
||||
</div>
|
||||
<% } %>
|
||||
|
@ -143,16 +144,17 @@
|
|||
<div class="pure-table-row <%= disabled %>" id="<%= id %>">
|
||||
|
||||
<div class="pure-u-9-24 left"><%= node.ShortName %></div>
|
||||
<div class="pure-u-8-24 left"><%= node.Endpoint %></div>
|
||||
<div class="pure-u-9-24 left"><%= node.Endpoint %></div>
|
||||
|
||||
<% var formatted = (node.LastHeartbeatAcked).substr(11, 18).slice(0, -1); %>
|
||||
<div class="pure-u-3-24 mid hide-small"><%= formatted %></div>
|
||||
<div class="pure-u-3-24 mid"><%= node.LastHeartbeatStatus %></div>
|
||||
<div class="pure-u-2-24 mid hide-small"><%= formatted %></div>
|
||||
|
||||
<div class="pure-u-2-24 mid"><i class="fa fa-info-circle"></i></div>
|
||||
|
||||
<% if(node.Status === 'GOOD') { %>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-check-circle"></i></div>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-check-circle tippy" title="<%= node.LastHeartbeatStatus %>"></i></div>
|
||||
<% } else { %>
|
||||
<div class="pure-u-1-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<div class="pure-u-2-24 mid state"><i class="fa fa-exclamation-circle"></i></div>
|
||||
<% } %>
|
||||
|
||||
</div>
|
||||
|
|
|
@ -0,0 +1,108 @@
|
|||
/* jshint browser: true */
|
||||
/* jshint unused: false */
|
||||
/* global arangoHelper, $, Backbone, templateEngine, window */
|
||||
(function () {
|
||||
'use strict';
|
||||
|
||||
window.NodeInfoView = Backbone.View.extend({
|
||||
el: '#content',
|
||||
|
||||
template: templateEngine.createTemplate('nodeInfoView.ejs'),
|
||||
|
||||
initialize: function (options) {
|
||||
if (window.App.isCluster) {
|
||||
this.nodeId = options.nodeId;
|
||||
this.dbServers = options.dbServers;
|
||||
this.coordinators = options.coordinators;
|
||||
}
|
||||
},
|
||||
|
||||
remove: function () {
|
||||
this.$el.empty().off(); /* off to unbind the events */
|
||||
this.stopListening();
|
||||
this.unbind();
|
||||
delete this.el;
|
||||
return this;
|
||||
},
|
||||
|
||||
render: function () {
|
||||
this.$el.html(this.template.render({entries: []}));
|
||||
|
||||
var callback = function () {
|
||||
this.continueRender();
|
||||
this.breadcrumb(arangoHelper.getCoordinatorShortName(this.nodeId));
|
||||
$(window).trigger('resize');
|
||||
}.bind(this);
|
||||
|
||||
if (!this.initCoordDone) {
|
||||
this.waitForCoordinators();
|
||||
}
|
||||
|
||||
if (!this.initDBDone) {
|
||||
this.waitForDBServers(callback);
|
||||
} else {
|
||||
this.nodeId = window.location.hash.split('/')[1];
|
||||
this.coordinator = this.coordinators.findWhere({name: this.coordname});
|
||||
callback();
|
||||
}
|
||||
},
|
||||
|
||||
continueRender: function () {
|
||||
var model;
|
||||
if (this.coordinator) {
|
||||
model = this.coordinator.toJSON();
|
||||
} else {
|
||||
model = this.dbServer.toJSON();
|
||||
}
|
||||
|
||||
var renderObj = {};
|
||||
renderObj.Name = model.name;
|
||||
renderObj.Address = model.address;
|
||||
renderObj.Status = model.status;
|
||||
renderObj.Protocol = model.protocol;
|
||||
renderObj.Role = model.role;
|
||||
this.$el.html(this.template.render({entries: renderObj}));
|
||||
},
|
||||
|
||||
breadcrumb: function (name) {
|
||||
$('#subNavigationBar .breadcrumb').html('Node: ' + name);
|
||||
},
|
||||
|
||||
waitForCoordinators: function (callback) {
|
||||
var self = this;
|
||||
|
||||
window.setTimeout(function () {
|
||||
if (self.coordinators.length === 0) {
|
||||
self.waitForCoordinators(callback);
|
||||
} else {
|
||||
self.coordinator = self.coordinators.findWhere({name: self.nodeId});
|
||||
self.initCoordDone = true;
|
||||
if (callback) {
|
||||
callback();
|
||||
}
|
||||
}
|
||||
}, 200);
|
||||
},
|
||||
|
||||
waitForDBServers: function (callback) {
|
||||
var self = this;
|
||||
|
||||
window.setTimeout(function () {
|
||||
if (self.dbServers.length === 0) {
|
||||
self.waitForDBServers(callback);
|
||||
} else {
|
||||
self.initDBDone = true;
|
||||
|
||||
self.dbServers.each(function (model) {
|
||||
if (model.get('id') === self.nodeId) {
|
||||
self.dbServer = model;
|
||||
}
|
||||
});
|
||||
|
||||
callback();
|
||||
}
|
||||
}, 200);
|
||||
}
|
||||
|
||||
});
|
||||
}());
|
|
@ -30,6 +30,14 @@
|
|||
}
|
||||
},
|
||||
|
||||
remove: function () {
|
||||
this.$el.empty().off(); /* off to unbind the events */
|
||||
this.stopListening();
|
||||
this.unbind();
|
||||
delete this.el;
|
||||
return this;
|
||||
},
|
||||
|
||||
breadcrumb: function (name) {
|
||||
$('#subNavigationBar .breadcrumb').html('Node: ' + name);
|
||||
},
|
||||
|
|
|
@ -22,6 +22,14 @@
|
|||
'keyup #plannedDBs': 'checkKey'
|
||||
},
|
||||
|
||||
remove: function () {
|
||||
this.$el.empty().off(); /* off to unbind the events */
|
||||
this.stopListening();
|
||||
this.unbind();
|
||||
delete this.el;
|
||||
return this;
|
||||
},
|
||||
|
||||
checkKey: function (e) {
|
||||
if (e.keyCode === 13) {
|
||||
var self = this;
|
||||
|
@ -121,11 +129,16 @@
|
|||
},
|
||||
|
||||
navigateToNode: function (elem) {
|
||||
var name = $(elem.currentTarget).attr('node').slice(0, -5);
|
||||
|
||||
if ($(elem.target).hasClass('fa-info-circle')) {
|
||||
window.App.navigate('#nodeInfo/' + encodeURIComponent(name), {trigger: true});
|
||||
return;
|
||||
}
|
||||
if ($(elem.currentTarget).hasClass('noHover')) {
|
||||
return;
|
||||
}
|
||||
|
||||
var name = $(elem.currentTarget).attr('node').slice(0, -5);
|
||||
window.App.navigate('#node/' + encodeURIComponent(name), {trigger: true});
|
||||
},
|
||||
|
||||
|
|
|
@ -33,8 +33,9 @@
|
|||
|
||||
.pure-table-body {
|
||||
.fa-check-circle,
|
||||
.fa-info-circle,
|
||||
.fa-exclamation-circle {
|
||||
font-size: 15pt;
|
||||
font-size: 13pt;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -80,6 +80,15 @@ function analyzeCoreDump (instanceInfo, options, storeArangodPath, pid) {
|
|||
executeExternalAndWait('/bin/bash', args);
|
||||
GDB_OUTPUT = fs.read(gdbOutputFile);
|
||||
print(GDB_OUTPUT);
|
||||
|
||||
command = 'gdb ' + storeArangodPath + ' ';
|
||||
|
||||
if (options.coreDirectory === '') {
|
||||
command += 'core';
|
||||
} else {
|
||||
command += options.coreDirectory;
|
||||
}
|
||||
return command;
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -112,6 +121,7 @@ function analyzeCoreDumpMac (instanceInfo, options, storeArangodPath, pid) {
|
|||
executeExternalAndWait('/bin/bash', args);
|
||||
GDB_OUTPUT = fs.read(lldbOutputFile);
|
||||
print(GDB_OUTPUT);
|
||||
return 'lldb ' + storeArangodPath + ' -c /cores/core.' + pid;
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -144,6 +154,8 @@ function analyzeCoreDumpWindows (instanceInfo) {
|
|||
|
||||
print('running cdb ' + JSON.stringify(args));
|
||||
executeExternalAndWait('cdb', args);
|
||||
|
||||
return 'cdb ' + args.join(' ');
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -189,24 +201,19 @@ function analyzeCrash (binary, arangod, options, checkStr) {
|
|||
yaml.safeDump(arangod) +
|
||||
'marking build as crashy.' + RESET);
|
||||
|
||||
let corePath = (options.coreDirectory === '')
|
||||
? 'core'
|
||||
: options.coreDirectory;
|
||||
|
||||
arangod.exitStatus.gdbHint = 'Run debugger with "gdb ' +
|
||||
storeArangodPath + ' ' + corePath;
|
||||
|
||||
let hint = '';
|
||||
if (platform.substr(0, 3) === 'win') {
|
||||
// Windows: wait for procdump to do its job...
|
||||
statusExternal(arangod.monitor, true);
|
||||
analyzeCoreDumpWindows(arangod);
|
||||
hint = analyzeCoreDumpWindows(arangod);
|
||||
} else if (platform === 'darwin') {
|
||||
fs.copyFile(binary, storeArangodPath);
|
||||
analyzeCoreDumpMac(arangod, options, storeArangodPath, arangod.pid);
|
||||
hint = analyzeCoreDumpMac(arangod, options, storeArangodPath, arangod.pid);
|
||||
} else {
|
||||
fs.copyFile(binary, storeArangodPath);
|
||||
analyzeCoreDump(arangod, options, storeArangodPath, arangod.pid);
|
||||
hint = analyzeCoreDump(arangod, options, storeArangodPath, arangod.pid);
|
||||
}
|
||||
arangod.exitStatus.gdbHint = 'Run debugger with "' + hint + '"';
|
||||
|
||||
print(RESET);
|
||||
}
|
||||
|
|
|
@ -790,6 +790,7 @@ function shutdownInstance (instanceInfo, options, forceTerminate) {
|
|||
} else if (arangod.exitStatus.status !== 'TERMINATED') {
|
||||
if (arangod.exitStatus.hasOwnProperty('signal')) {
|
||||
analyzeServerCrash(arangod, options, 'instance Shutdown - ' + arangod.exitStatus.signal);
|
||||
serverCrashed = true;
|
||||
}
|
||||
} else {
|
||||
print('Server shutdown: Success: pid', arangod.pid);
|
||||
|
|
|
@ -347,8 +347,8 @@ function printTraversalDetails (traversals) {
|
|||
maxEdgeCollectionNameStrLen = node.edgeCollectionNameStrLen;
|
||||
}
|
||||
}
|
||||
if (node.hasOwnProperty('traversalFlags')) {
|
||||
var opts = optify(node.traversalFlags);
|
||||
if (node.hasOwnProperty('options')) {
|
||||
var opts = optify(node.options);
|
||||
if (opts.length > maxOptionsLen) {
|
||||
maxOptionsLen = opts.length;
|
||||
}
|
||||
|
@ -384,8 +384,8 @@ function printTraversalDetails (traversals) {
|
|||
line += pad(1 + maxEdgeCollectionNameStrLen) + ' ';
|
||||
}
|
||||
|
||||
if (traversals[i].hasOwnProperty('traversalFlags')) {
|
||||
line += optify(traversals[i].traversalFlags, true) + pad(1 + maxOptionsLen - optify(traversals[i].traversalFlags, false).length) + ' ';
|
||||
if (traversals[i].hasOwnProperty('options')) {
|
||||
line += optify(traversals[i].options, true) + pad(1 + maxOptionsLen - optify(traversals[i].options, false).length) + ' ';
|
||||
} else {
|
||||
line += pad(1 + maxOptionsLen) + ' ';
|
||||
}
|
||||
|
@ -856,7 +856,7 @@ function processQuery (query, explain) {
|
|||
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + collection(node.collection) + ' ' + annotation('/* ' + (node.reverse ? 'reverse ' : '') + node.index.type + ' index scan */');
|
||||
|
||||
case 'TraversalNode':
|
||||
node.minMaxDepth = node.traversalFlags.minDepth + '..' + node.traversalFlags.maxDepth;
|
||||
node.minMaxDepth = node.options.minDepth + '..' + node.options.maxDepth;
|
||||
node.minMaxDepthLen = node.minMaxDepth.length;
|
||||
|
||||
rc = keyword('FOR ');
|
||||
|
|
|
@ -644,6 +644,10 @@ var checkIfMayBeDropped = function (colName, graphName, graphs) {
|
|||
var result = true;
|
||||
graphs.forEach(
|
||||
function (graph) {
|
||||
if (result === false) {
|
||||
// Short circuit
|
||||
return;
|
||||
}
|
||||
if (graph._key === graphName) {
|
||||
return;
|
||||
}
|
||||
|
@ -2008,44 +2012,47 @@ exports._drop = function (graphId, dropCollections) {
|
|||
|
||||
if (dropCollections === true) {
|
||||
graphs = exports._listObjects();
|
||||
// Here we collect all collections
|
||||
// that are leading for distribution
|
||||
var initialCollections = new Set();
|
||||
let dropColCB = (name) => {
|
||||
if (checkIfMayBeDropped(name, graph._key, graphs)) {
|
||||
try {
|
||||
let colObj = db[name];
|
||||
if (colObj !== undefined) {
|
||||
// If it is undefined the collection is gone already
|
||||
if (colObj.properties().distributeShardsLike !== undefined) {
|
||||
db._drop(name);
|
||||
} else {
|
||||
initialCollections.add(name);
|
||||
}
|
||||
}
|
||||
} catch (ignore) {}
|
||||
}
|
||||
};
|
||||
// drop orphans
|
||||
if (!graph.orphanCollections) {
|
||||
graph.orphanCollections = [];
|
||||
}
|
||||
graph.orphanCollections.forEach(dropColCB);
|
||||
var edgeDefinitions = graph.edgeDefinitions;
|
||||
edgeDefinitions.forEach(
|
||||
function (edgeDefinition) {
|
||||
var from = edgeDefinition.from;
|
||||
var to = edgeDefinition.to;
|
||||
var collection = edgeDefinition.collection;
|
||||
if (checkIfMayBeDropped(collection, graph._key, graphs)) {
|
||||
db._drop(collection);
|
||||
}
|
||||
from.forEach(
|
||||
function (col) {
|
||||
if (checkIfMayBeDropped(col, graph._key, graphs)) {
|
||||
db._drop(col);
|
||||
}
|
||||
}
|
||||
);
|
||||
to.forEach(
|
||||
function (col) {
|
||||
if (checkIfMayBeDropped(col, graph._key, graphs)) {
|
||||
db._drop(col);
|
||||
}
|
||||
}
|
||||
);
|
||||
dropColCB(edgeDefinition.collection);
|
||||
from.forEach(dropColCB);
|
||||
to.forEach(dropColCB);
|
||||
}
|
||||
);
|
||||
// drop orphans
|
||||
if (!graph.orphanCollections) {
|
||||
graph.orphanCollections = [];
|
||||
for (let c of initialCollections) {
|
||||
try {
|
||||
db._drop(c);
|
||||
} catch (e) {
|
||||
console.error("Failed to Drop: '" + c + "' reason: " + e.message);
|
||||
}
|
||||
}
|
||||
graph.orphanCollections.forEach(
|
||||
function (oC) {
|
||||
if (checkIfMayBeDropped(oC, graph._key, graphs)) {
|
||||
try {
|
||||
db._drop(oC);
|
||||
} catch (ignore) {}
|
||||
}
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
gdb.remove(graphId);
|
||||
|
|
|
@ -404,4 +404,3 @@ function dumpTestSuite () {
|
|||
jsunity.run(dumpTestSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
||||
|
|
|
@ -0,0 +1,308 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen : 4000 */
|
||||
/*global assertEqual, assertTrue, assertFalse */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for dump/reload
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var internal = require("internal");
|
||||
var jsunity = require("jsunity");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function dumpTestSuite () {
|
||||
'use strict';
|
||||
var db = internal.db;
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set up
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
setUp : function () {
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tear down
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
tearDown : function () {
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the empty collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testEmpty : function () {
|
||||
var c = db._collection("UnitTestsDumpEmpty");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertTrue(p.waitForSync);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(0, c.count());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the collection with many documents
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testMany : function () {
|
||||
var c = db._collection("UnitTestsDumpMany");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(100000, c.count());
|
||||
|
||||
// test all documents
|
||||
var r = db._query(`FOR d IN ${c.name()} RETURN d`).toArray();
|
||||
var rr = new Map();
|
||||
for (let i = 0; i < r.length; ++i) {
|
||||
rr.set(r[i]._key, r[i]);
|
||||
}
|
||||
for (let i = 0; i < 100000; ++i) {
|
||||
var doc = rr.get("test" + i);
|
||||
assertEqual(i, doc.value1);
|
||||
assertEqual("this is a test", doc.value2);
|
||||
assertEqual("test" + i, doc.value3);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the edges collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testEdges : function () {
|
||||
var c = db._collection("UnitTestsDumpEdges");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(3, c.type()); // edges
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(2, c.getIndexes().length); // primary index + edges index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual("edge", c.getIndexes()[1].type);
|
||||
assertEqual(10, c.count());
|
||||
|
||||
// test all documents
|
||||
for (var i = 0; i < 10; ++i) {
|
||||
var doc = c.document("test" + i);
|
||||
assertEqual("test" + i, doc._key);
|
||||
assertEqual("UnitTestsDumpMany/test" + i, doc._from);
|
||||
assertEqual("UnitTestsDumpMany/test" + (i + 1), doc._to);
|
||||
assertEqual(i + "->" + (i + 1), doc.what);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the order of documents
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testOrder : function () {
|
||||
var c = db._collection("UnitTestsDumpOrder");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(3, c.count());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test document removal & update
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testRemoved : function () {
|
||||
var c = db._collection("UnitTestsDumpRemoved");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(9000, c.count());
|
||||
|
||||
var i;
|
||||
for (i = 0; i < 10000; ++i) {
|
||||
if (i % 10 === 0) {
|
||||
assertFalse(c.exists("test" + i));
|
||||
}
|
||||
else {
|
||||
var doc = c.document("test" + i);
|
||||
assertEqual(i, doc.value1);
|
||||
|
||||
if (i < 1000) {
|
||||
assertEqual(i + 1, doc.value2);
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test indexes
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testIndexes : function () {
|
||||
var c = db._collection("UnitTestsDumpIndexes");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(7, c.getIndexes().length);
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
|
||||
assertEqual("hash", c.getIndexes()[1].type);
|
||||
assertTrue(c.getIndexes()[1].unique);
|
||||
assertFalse(c.getIndexes()[1].sparse);
|
||||
assertEqual([ "a_uc" ], c.getIndexes()[1].fields);
|
||||
|
||||
assertEqual("skiplist", c.getIndexes()[2].type);
|
||||
assertFalse(c.getIndexes()[2].unique);
|
||||
assertFalse(c.getIndexes()[2].sparse);
|
||||
assertEqual([ "a_s1", "a_s2" ], c.getIndexes()[2].fields);
|
||||
|
||||
assertEqual("hash", c.getIndexes()[3].type);
|
||||
assertFalse(c.getIndexes()[3].unique);
|
||||
assertFalse(c.getIndexes()[3].sparse);
|
||||
assertEqual([ "a_h1", "a_h2" ], c.getIndexes()[3].fields);
|
||||
|
||||
assertEqual("skiplist", c.getIndexes()[4].type);
|
||||
assertTrue(c.getIndexes()[4].unique);
|
||||
assertFalse(c.getIndexes()[4].sparse);
|
||||
assertEqual([ "a_su" ], c.getIndexes()[4].fields);
|
||||
|
||||
assertEqual("hash", c.getIndexes()[5].type);
|
||||
assertFalse(c.getIndexes()[5].unique);
|
||||
assertTrue(c.getIndexes()[5].sparse);
|
||||
assertEqual([ "a_hs1", "a_hs2" ], c.getIndexes()[5].fields);
|
||||
|
||||
assertEqual("skiplist", c.getIndexes()[6].type);
|
||||
assertFalse(c.getIndexes()[6].unique);
|
||||
assertTrue(c.getIndexes()[6].sparse);
|
||||
assertEqual([ "a_ss1", "a_ss2" ], c.getIndexes()[6].fields);
|
||||
|
||||
assertEqual(0, c.count());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test truncate
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testTruncated : function () {
|
||||
var c = db._collection("UnitTestsDumpTruncated");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(0, c.count());
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test shards
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testShards : function () {
|
||||
var c = db._collection("UnitTestsDumpShards");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
assertEqual(9, p.numberOfShards);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(1000, c.count());
|
||||
|
||||
for (var i = 0; i < 1000; ++i) {
|
||||
var doc = c.document(String(7 + (i * 42)));
|
||||
|
||||
assertEqual(String(7 + (i * 42)), doc._key);
|
||||
assertEqual(i, doc.value);
|
||||
assertEqual({ value: [ i, i ] }, doc.more);
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test strings
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testStrings : function () {
|
||||
var c = db._collection("UnitTestsDumpStrings");
|
||||
var p = c.properties();
|
||||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(8, c.count());
|
||||
|
||||
var texts = [
|
||||
"big. Really big. He moment. Magrathea! - insisted Arthur, - I do you can sense no further because it doesn't fit properly. In my the denies faith, and the atmosphere beneath You are not cheap He was was his satchel. He throughout Magrathea. - He pushed a tore the ecstatic crowd. Trillian sat down the time, the existence is it? And he said, - What they don't want this airtight hatchway. - it's we you shooting people would represent their Poet Master Grunthos is in his mind.",
|
||||
"Ultimo cadere chi sedete uso chiuso voluto ora. Scotendosi portartela meraviglia ore eguagliare incessante allegrezza per. Pensava maestro pungeva un le tornano ah perduta. Fianco bearmi storia soffio prende udi poteva una. Cammino fascino elisire orecchi pollici mio cui sai sul. Chi egli sino sei dita ben. Audace agonie groppa afa vai ultima dentro scossa sii. Alcuni mia blocco cerchi eterno andare pagine poi. Ed migliore di sommesso oh ai angoscia vorresti.",
|
||||
"Νέο βάθος όλα δομές της χάσει. Μέτωπο εγώ συνάμα τρόπος και ότι όσο εφόδιο κόσμου. Προτίμηση όλη διάφορους του όλο εύθραυστη συγγραφής. Στα άρα ένα μία οποία άλλων νόημα. Ένα αποβαίνει ρεαλισμού μελετητές θεόσταλτο την. Ποντιακών και rites κοριτσάκι παπούτσια παραμύθια πει κυρ.",
|
||||
"Mody laty mnie ludu pole rury Białopiotrowiczowi. Domy puer szczypię jemy pragnął zacność czytając ojca lasy Nowa wewnątrz klasztoru. Chce nóg mego wami. Zamku stał nogą imion ludzi ustaw Białopiotrowiczem. Kwiat Niesiołowskiemu nierostrzygniony Staje brał Nauka dachu dumę Zamku Kościuszkowskie zagon. Jakowaś zapytać dwie mój sama polu uszakach obyczaje Mój. Niesiołowski książkowéj zimny mały dotychczasowa Stryj przestraszone Stolnikównie wdał śmiertelnego. Stanisława charty kapeluszach mięty bratem każda brząknął rydwan.",
|
||||
"Мелких против летают хижину тмится. Чудесам возьмет звездна Взжигай. . Податель сельские мучитель сверкает очищаясь пламенем. Увы имя меч Мое сия. Устранюсь воздушных Им от До мысленные потушатся Ко Ея терпеньем.",
|
||||
"dotyku. Výdech spalin bude položen záplavový detekční kabely 1x UPS Newave Conceptpower DPA 5x 40kVA bude ukončen v samostatné strojovně. Samotné servery mají pouze lokalita Ústí nad zdvojenou podlahou budou zakončené GateWayí HiroLink - Monitoring rozvaděče RTN na jednotlivých záplavových zón na soustrojí resp. technologie jsou označeny SA-MKx.y. Jejich výstupem je zajištěn přestupem dat z jejich provoz. Na dveřích vylepené výstražné tabulky. Kabeláž z okruhů zálohovaných obvodů v R.MON-I. Monitoring EZS, EPS, ... možno zajistit funkčností FireWallů na strukturovanou kabeláží vedenou v měrných jímkách zapuštěných v každém racku budou zakončeny v R.MON-NrNN. Monitoring motorgenerátorů: řídící systém bude zakončena v modulu",
|
||||
"ramien mu zrejme vôbec niekto je už presne čo mám tendenciu prispôsobiť dych jej páčil, čo chce. Hmm... Včera sa mi pozdava, len dočkali, ale keďže som na uz boli u jej nezavrela. Hlava jej to ve městě nepotká, hodně mi to tí vedci pri hre, keď je tu pre Designiu. Pokiaľ viete o odbornejšie texty. Prvým z tmavých uličiek, každý to niekedy, zrovnávať krok s obrovským batohom na okraj vane a temné úmysly, tak rozmýšľam, aký som si hromady mailov, čo chcem a neraz sa pokúšal o filmovém klubu v budúcnosti rozhodne uniesť mladú maliarku (Linda Rybová), ktorú so",
|
||||
" 復讐者」. 復讐者」. 伯母さん 復讐者」. 復讐者」. 復讐者」. 復讐者」. 第九章 第五章 第六章 第七章 第八章. 復讐者」 伯母さん. 復讐者」 伯母さん. 第十一章 第十九章 第十四章 第十八章 第十三章 第十五章. 復讐者」 . 第十四章 第十一章 第十二章 第十五章 第十七章 手配書. 第十四章 手配書 第十八章 第十七章 第十六章 第十三章. 第十一章 第十三章 第十八章 第十四章 手配書. 復讐者」."
|
||||
];
|
||||
|
||||
texts.forEach(function (t, i) {
|
||||
var doc = c.document("text" + i);
|
||||
|
||||
assertEqual(t, doc.value);
|
||||
});
|
||||
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(dumpTestSuite);
|
||||
|
||||
return jsunity.done();
|
||||
|
|
@ -185,7 +185,7 @@ function dumpTestSuite () {
|
|||
assertFalse(p.waitForSync);
|
||||
assertFalse(p.isVolatile);
|
||||
|
||||
assertEqual(9, c.getIndexes().length);
|
||||
assertEqual(7, c.getIndexes().length);
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
|
||||
assertEqual("hash", c.getIndexes()[1].type);
|
||||
|
|
|
@ -292,7 +292,7 @@ class AssocMulti {
|
|||
void batchInsert(std::function<void*()> const& contextCreator,
|
||||
std::function<void(void*)> const& contextDestroyer,
|
||||
std::shared_ptr<std::vector<Element> const> data,
|
||||
LocalTaskQueue* queue) {
|
||||
std::shared_ptr<LocalTaskQueue> queue) {
|
||||
if (data->empty()) {
|
||||
// nothing to do
|
||||
return;
|
||||
|
|
|
@ -94,7 +94,7 @@ class MultiInserterTask : public LocalTask {
|
|||
|
||||
public:
|
||||
MultiInserterTask(
|
||||
LocalTaskQueue* queue, std::function<void(void*)> contextDestroyer,
|
||||
std::shared_ptr<LocalTaskQueue> queue, std::function<void(void*)> contextDestroyer,
|
||||
std::vector<Bucket>* buckets,
|
||||
std::function<Element(void*, Element const&, uint64_t, Bucket&,
|
||||
bool const, bool const)>
|
||||
|
@ -168,7 +168,7 @@ class MultiPartitionerTask : public LocalTask {
|
|||
|
||||
public:
|
||||
MultiPartitionerTask(
|
||||
LocalTaskQueue* queue,
|
||||
std::shared_ptr<LocalTaskQueue> queue,
|
||||
std::function<uint64_t(void*, Element const&, bool)> hashElement,
|
||||
std::function<void(void*)> const& contextDestroyer,
|
||||
std::shared_ptr<std::vector<Element> const> data, size_t lower,
|
||||
|
|
|
@ -553,7 +553,7 @@ class AssocUnique {
|
|||
void batchInsert(std::function<void*()> const& contextCreator,
|
||||
std::function<void(void*)> const& contextDestroyer,
|
||||
std::shared_ptr<std::vector<Element> const> data,
|
||||
arangodb::basics::LocalTaskQueue* queue) {
|
||||
std::shared_ptr<arangodb::basics::LocalTaskQueue> queue) {
|
||||
TRI_ASSERT(queue != nullptr);
|
||||
if (data->empty()) {
|
||||
// nothing to do
|
||||
|
|
|
@ -70,7 +70,7 @@ class UniqueInserterTask : public LocalTask {
|
|||
|
||||
public:
|
||||
UniqueInserterTask(
|
||||
LocalTaskQueue* queue, std::function<void(void*)> contextDestroyer,
|
||||
std::shared_ptr<LocalTaskQueue> queue, std::function<void(void*)> contextDestroyer,
|
||||
std::vector<Bucket>* buckets,
|
||||
std::function<int(void*, Element const&, Bucket&, uint64_t)> doInsert,
|
||||
std::function<bool(void*, Bucket&, uint64_t)> checkResize, size_t i,
|
||||
|
@ -140,7 +140,7 @@ class UniquePartitionerTask : public LocalTask {
|
|||
|
||||
public:
|
||||
UniquePartitionerTask(
|
||||
LocalTaskQueue* queue,
|
||||
std::shared_ptr<LocalTaskQueue> queue,
|
||||
std::function<uint64_t(void*, Element const&)> hashElement,
|
||||
std::function<void(void*)> const& contextDestroyer,
|
||||
std::shared_ptr<std::vector<Element> const> data, size_t lower,
|
||||
|
|
|
@ -34,7 +34,7 @@ using namespace arangodb::basics;
|
|||
/// @brief create a task tied to the specified queue
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
LocalTask::LocalTask(LocalTaskQueue* queue) : _queue(queue) {}
|
||||
LocalTask::LocalTask(std::shared_ptr<LocalTaskQueue> queue) : _queue(queue) {}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief dispatch this task to the underlying io_service
|
||||
|
@ -58,7 +58,7 @@ void LocalTask::dispatch() {
|
|||
/// @brief create a callback task tied to the specified queue
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
LocalCallbackTask::LocalCallbackTask(LocalTaskQueue* queue,
|
||||
LocalCallbackTask::LocalCallbackTask(std::shared_ptr<LocalTaskQueue> queue,
|
||||
std::function<void()> cb)
|
||||
: _queue(queue), _cb(cb) {}
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ class LocalTask : public std::enable_shared_from_this<LocalTask> {
|
|||
LocalTask(LocalTask const&) = delete;
|
||||
LocalTask& operator=(LocalTask const&) = delete;
|
||||
|
||||
explicit LocalTask(LocalTaskQueue* queue);
|
||||
explicit LocalTask(std::shared_ptr<LocalTaskQueue> queue);
|
||||
virtual ~LocalTask() {}
|
||||
|
||||
virtual void run() = 0;
|
||||
|
@ -54,7 +54,7 @@ class LocalTask : public std::enable_shared_from_this<LocalTask> {
|
|||
/// @brief the underlying queue
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
LocalTaskQueue* _queue;
|
||||
std::shared_ptr<LocalTaskQueue> _queue;
|
||||
};
|
||||
|
||||
class LocalCallbackTask
|
||||
|
@ -64,7 +64,7 @@ class LocalCallbackTask
|
|||
LocalCallbackTask(LocalCallbackTask const&) = delete;
|
||||
LocalCallbackTask& operator=(LocalCallbackTask const&) = delete;
|
||||
|
||||
LocalCallbackTask(LocalTaskQueue* queue, std::function<void()> cb);
|
||||
LocalCallbackTask(std::shared_ptr<LocalTaskQueue> queue, std::function<void()> cb);
|
||||
virtual ~LocalCallbackTask() {}
|
||||
|
||||
virtual void run();
|
||||
|
@ -75,7 +75,7 @@ class LocalCallbackTask
|
|||
/// @brief the underlying queue
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
LocalTaskQueue* _queue;
|
||||
std::shared_ptr<LocalTaskQueue> _queue;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief the callback executed by run() (any exceptions will be caught and
|
||||
|
|
|
@ -179,12 +179,13 @@ Thread::~Thread() {
|
|||
}
|
||||
|
||||
_state.store(ThreadState::DETACHED);
|
||||
return;
|
||||
}
|
||||
|
||||
state = _state.load();
|
||||
|
||||
if (state != ThreadState::DETACHED && state != ThreadState::CREATED) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "thread is not detached but " << stringify(state)
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "thread '" << _name << "' is not detached but " << stringify(state)
|
||||
<< ". shutting down hard";
|
||||
FATAL_ERROR_ABORT();
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue