1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
Andreas Streichardt 2017-01-11 15:10:39 +01:00
commit 1b730c2acc
16 changed files with 192 additions and 150 deletions

View File

@ -25,9 +25,6 @@
#include "ApplicationFeatures/ApplicationFeature.h"
#include <openssl/ssl.h>
#include <boost/asio/ssl.hpp>
#include "Actions/RestActionHandler.h"
#include "Basics/asio-helper.h"

View File

@ -825,7 +825,7 @@ bool MMFilesCollection::iterateDatafilesVector(std::vector<TRI_datafile_t*> cons
return false;
}
if (datafile->isPhysical() && datafile->_isSealed) {
if (datafile->isPhysical() && datafile->isSealed()) {
datafile->randomAccess();
}
}
@ -879,7 +879,7 @@ void MMFilesCollection::figures(std::shared_ptr<arangodb::velocypack::Builder>&
size_t sizeDatafiles = 0;
builder->add("datafiles", VPackValue(VPackValueType::Object));
for (auto const& it : _datafiles) {
sizeDatafiles += it->_initSize;
sizeDatafiles += it->initSize();
}
builder->add("count", VPackValue(_datafiles.size()));
@ -888,7 +888,7 @@ void MMFilesCollection::figures(std::shared_ptr<arangodb::velocypack::Builder>&
size_t sizeJournals = 0;
for (auto const& it : _journals) {
sizeJournals += it->_initSize;
sizeJournals += it->initSize();
}
builder->add("journals", VPackValue(VPackValueType::Object));
builder->add("count", VPackValue(_journals.size()));
@ -897,7 +897,7 @@ void MMFilesCollection::figures(std::shared_ptr<arangodb::velocypack::Builder>&
size_t sizeCompactors = 0;
for (auto const& it : _compactors) {
sizeCompactors += it->_initSize;
sizeCompactors += it->initSize();
}
builder->add("compactors", VPackValue(VPackValueType::Object));
builder->add("count", VPackValue(_compactors.size()));
@ -970,11 +970,11 @@ bool MMFilesCollection::applyForTickRange(TRI_voc_tick_t dataMin, TRI_voc_tick_t
CONDITIONAL_READ_LOCKER(readLocker, _filesLock, e._isJournal);
if (!e._isJournal) {
TRI_ASSERT(datafile->_isSealed);
TRI_ASSERT(datafile->isSealed());
}
char const* ptr = datafile->_data;
char const* end = ptr + datafile->_currentSize;
char const* end = ptr + datafile->currentSize();
while (ptr < end) {
auto const* marker = reinterpret_cast<TRI_df_marker_t const*>(ptr);

View File

@ -686,7 +686,7 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
break;
}
if (!doCompact && df->_maximalSize < smallDatafileSize() && i < n - 1) {
if (!doCompact && df->maximalSize() < smallDatafileSize() && i < n - 1) {
// very small datafile and not the last one. let's compact it so it's
// merged with others
doCompact = true;
@ -710,7 +710,7 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
} else if (dfi.sizeDead > 0 &&
(((double)dfi.sizeDead /
((double)dfi.sizeDead + (double)dfi.sizeAlive) >= deadShare()) ||
((double)dfi.sizeDead / (double)df->_maximalSize >= deadShare()))) {
((double)dfi.sizeDead / (double)df->maximalSize() >= deadShare()))) {
// the size of dead objects is above some share
doCompact = true;
reason = ReasonDeadSizeShare;
@ -741,7 +741,7 @@ bool MMFilesCompactorThread::compactCollection(LogicalCollection* collection, bo
// size of the resulting file
if (reason != ReasonOnlyDeletions) {
if (!toCompact.empty() &&
totalSize + (uint64_t)df->_maximalSize >= maxSize &&
totalSize + (uint64_t)df->maximalSize() >= maxSize &&
(toCompact.size() != 1 || reason != ReasonDatafileSmall)) {
// found enough files to compact (in terms of cumulated size)
// there's one exception to this: if we're merging multiple datafiles,

View File

@ -1925,7 +1925,7 @@ int MMFilesEngine::openCollection(TRI_vocbase_t* vocbase, LogicalCollection* col
// file is a journal
if (filetype == "journal") {
if (datafile->_isSealed) {
if (datafile->isSealed()) {
if (datafile->state() != TRI_DF_STATE_READ) {
LOG_TOPIC(WARN, Logger::DATAFILES)
<< "strange, journal '" << filename
@ -1946,7 +1946,7 @@ int MMFilesEngine::openCollection(TRI_vocbase_t* vocbase, LogicalCollection* col
// file is a datafile (or was a compaction file)
else if (filetype == "datafile" || filetype == "compaction") {
if (!datafile->_isSealed) {
if (!datafile->isSealed()) {
LOG_TOPIC(ERR, Logger::DATAFILES)
<< "datafile '" << filename
<< "' is not sealed, this should never happen";

View File

@ -1896,7 +1896,15 @@ void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
TRI_ASSERT(idx->type() != arangodb::Index::TRI_IDX_TYPE_PRIMARY_INDEX ||
_indexes.empty());
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(idx->id()));
auto const id = idx->id();
for (auto const& it : _indexes) {
if (it->id() == id) {
// already have this particular index. do not add it again
return;
}
}
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(id));
_indexes.emplace_back(idx);
@ -1911,6 +1919,15 @@ void LogicalCollection::addIndex(std::shared_ptr<arangodb::Index> idx) {
void LogicalCollection::addIndexCoordinator(
std::shared_ptr<arangodb::Index> idx, bool distribute) {
auto const id = idx->id();
for (auto const& it : _indexes) {
if (it->id() == id) {
// already have this particular index. do not add it again
return;
}
}
_indexes.emplace_back(idx);
if (distribute) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_NOT_IMPLEMENTED);

View File

@ -44,6 +44,7 @@ using namespace arangodb::basics;
namespace {
/// @brief create a hex string representation of the value
static std::string hexValue(uint64_t value) {
static const uint64_t Bits[] = { 56, 48, 40, 32, 24, 16, 8, 0 };
@ -60,7 +61,7 @@ static std::string hexValue(uint64_t value) {
return line;
}
/// @brief check if a marker appears to be created by ArangoDB 28
/// @brief calculate a CRC value the same way as ArangoDB 2.8 did
static TRI_voc_crc_t Crc28(TRI_voc_crc_t crc, void const* data, size_t length) {
static TRI_voc_crc_t const CrcPolynomial = 0xEDB88320;
unsigned char* current = (unsigned char*) data;
@ -78,6 +79,7 @@ static TRI_voc_crc_t Crc28(TRI_voc_crc_t crc, void const* data, size_t length) {
return crc;
}
/// @brief check if a marker appears to be created by ArangoDB 2.8
static bool IsMarker28(void const* marker) {
struct Marker28 {
TRI_voc_size_t _size;
@ -269,6 +271,48 @@ static TRI_datafile_t* CreatePhysicalDatafile(std::string const& filename,
}
}
/// @brief whether or not a datafile is empty
int TRI_datafile_t::judge(std::string const& filename) {
off_t filesize = basics::FileUtils::size(filename);
if (filesize == 0) {
// empty logfile
return TRI_ERROR_ARANGO_DATAFILE_EMPTY;
}
if (filesize < static_cast<off_t>(256 * sizeof(uint64_t))) {
// too small
return TRI_ERROR_ARANGO_DATAFILE_UNREADABLE;
}
int fd = TRI_OPEN(filename.c_str(), O_RDONLY | TRI_O_CLOEXEC);
if (fd < 0) {
return TRI_ERROR_ARANGO_DATAFILE_UNREADABLE;
}
uint64_t buffer[256];
if (!TRI_ReadPointer(fd, &buffer, 256 * sizeof(uint64_t))) {
TRI_CLOSE(fd);
return TRI_ERROR_ARANGO_DATAFILE_UNREADABLE;
}
uint64_t* ptr = buffer;
uint64_t* end = buffer + 256;
while (ptr < end) {
if (*ptr != 0) {
TRI_CLOSE(fd);
return TRI_ERROR_NO_ERROR;
}
++ptr;
}
TRI_CLOSE(fd);
return TRI_ERROR_ARANGO_DATAFILE_EMPTY;
}
/// @brief creates either an anonymous or a physical datafile
TRI_datafile_t* TRI_datafile_t::create(std::string const& filename, TRI_voc_fid_t fid,
TRI_voc_size_t maximalSize,
@ -312,7 +356,7 @@ TRI_datafile_t* TRI_datafile_t::create(std::string const& filename, TRI_voc_fid_
if (res != TRI_ERROR_NO_ERROR) {
LOG(ERR) << "cannot write header to datafile '" << datafile->getName() << "'";
TRI_UNMMFile(datafile->_data, datafile->maximalSize(), datafile->fd(),
TRI_UNMMFile(const_cast<char*>(datafile->data()), datafile->initSize(), datafile->fd(),
&datafile->_mmHandle);
datafile->close();
@ -461,8 +505,7 @@ int TRI_datafile_t::reserveElement(TRI_voc_size_t size, TRI_df_marker_t** positi
TRI_ASSERT(*position != nullptr);
_next += size;
_currentSize += size;
advanceWritePosition(size);
return TRI_ERROR_NO_ERROR;
}
@ -629,8 +672,8 @@ bool TRI_IterateDatafile(TRI_datafile_t* datafile,
LOG(TRACE) << "iterating over datafile '" << datafile->getName() << "', fid: " << datafile->fid();
char const* ptr = datafile->_data;
char const* end = datafile->_data + datafile->_currentSize;
char const* ptr = datafile->data();
char const* end = ptr + datafile->currentSize();
if (datafile->state() != TRI_DF_STATE_READ &&
datafile->state() != TRI_DF_STATE_WRITE) {
@ -673,8 +716,8 @@ bool TRI_IterateDatafile(TRI_datafile_t* datafile,
std::function<bool(TRI_df_marker_t const*, TRI_datafile_t*)> const& cb) {
LOG(TRACE) << "iterating over datafile '" << datafile->getName() << "', fid: " << datafile->fid();
char const* ptr = datafile->_data;
char const* end = datafile->_data + datafile->_currentSize;
char const* ptr = datafile->data();
char const* end = ptr + datafile->currentSize();
if (datafile->state() != TRI_DF_STATE_READ &&
datafile->state() != TRI_DF_STATE_WRITE) {
@ -926,6 +969,9 @@ TRI_datafile_t::TRI_datafile_t(std::string const& filename, int fd, void* mmHand
_maximalSize(maximalSize),
_currentSize(currentSize),
_footerSize(sizeof(TRI_df_footer_marker_t)),
_full(false),
_isSealed(false),
_lockedInMemory(false),
_data(data),
_next(data + currentSize),
_tickMin(0),
@ -933,9 +979,6 @@ TRI_datafile_t::TRI_datafile_t(std::string const& filename, int fd, void* mmHand
_dataMin(0),
_dataMax(0),
_lastError(TRI_ERROR_NO_ERROR),
_full(false),
_isSealed(false),
_lockedInMemory(false),
_synced(data),
_written(nullptr) {
// filename is a string for physical datafiles, and NULL for anonymous regions
@ -1182,7 +1225,7 @@ bool TRI_datafile_t::check(bool ignoreFailures) {
LOG(TRACE) << "checking markers in datafile '" << getName() << "'";
char const* ptr = _data;
char const* end = _data + _currentSize;
char const* end = ptr + _currentSize;
char const* lastGood = nullptr;
TRI_voc_size_t currentSize = 0;
@ -1718,8 +1761,8 @@ TRI_datafile_t* TRI_datafile_t::open(std::string const& filename, bool ignoreFai
bool ok = datafile->check(ignoreFailures);
if (!ok) {
TRI_UNMMFile(datafile->_data, datafile->_maximalSize, datafile->_fd, &datafile->_mmHandle);
TRI_CLOSE(datafile->_fd);
TRI_UNMMFile(const_cast<char*>(datafile->data()), datafile->initSize(), datafile->fd(), &datafile->_mmHandle);
TRI_CLOSE(datafile->fd());
LOG(ERR) << "datafile '" << datafile->getName() << "' is corrupt";
// must free datafile here
@ -1733,7 +1776,7 @@ TRI_datafile_t* TRI_datafile_t::open(std::string const& filename, bool ignoreFai
LOG(ERR) << "unable to change memory protection for memory backed by datafile '" << datafile->getName() << "'. please check file permissions and mount options.";
return nullptr;
}
datafile->_state = TRI_DF_STATE_WRITE;
datafile->setState(TRI_DF_STATE_WRITE);
}
// Advise on sequential use:

View File

@ -197,6 +197,9 @@ struct TRI_datafile_t {
/// @brief truncates a datafile and seals it, only called by arango-dfdd
static int truncate(std::string const& path, TRI_voc_size_t position);
/// @brief whether or not a datafile is empty
static int judge(std::string const& filename);
/// @brief creates either an anonymous or a physical datafile
static TRI_datafile_t* create(std::string const& filename, TRI_voc_fid_t fid,
TRI_voc_size_t maximalSize,
@ -253,6 +256,17 @@ struct TRI_datafile_t {
void setState(TRI_df_state_e state) { _state = state; }
bool isSealed() const { return _isSealed; }
char* advanceWritePosition(size_t size) {
char* old = _next;
_next += size;
_currentSize += static_cast<TRI_voc_size_t>(size);
return old;
}
private:
/// @brief returns information about the datafile
DatafileScan scanHelper();
@ -275,7 +289,7 @@ struct TRI_datafile_t {
bool tryRepair();
void printMarker(TRI_df_marker_t const* marker, TRI_voc_size_t size, char const* begin, char const* end) const;
private:
std::string _filename; // underlying filename
TRI_voc_fid_t const _fid; // datafile identifier
@ -284,12 +298,17 @@ struct TRI_datafile_t {
void* _mmHandle; // underlying memory map object handle (windows only)
public:
TRI_voc_size_t const _initSize; // initial size of the datafile (constant)
TRI_voc_size_t _maximalSize; // maximal size of the datafile (may be adjusted/reduced at runtime)
TRI_voc_size_t _currentSize; // current size of the datafile
TRI_voc_size_t _footerSize; // size of the final footer
bool _full; // at least one request was rejected because there is not enough
// room
bool _isSealed; // true, if footer has been written
bool _lockedInMemory; // whether or not the datafile is locked in memory (mlock)
public:
char* _data; // start of the data array
char* _next; // end of the current data
@ -299,10 +318,6 @@ struct TRI_datafile_t {
TRI_voc_tick_t _dataMax; // maximum tick value of document/edge marker
int _lastError; // last (critical) error
bool _full; // at least one request was rejected because there is not enough
// room
bool _isSealed; // true, if footer has been written
bool _lockedInMemory; // whether or not the datafile is locked in memory (mlock)
// .............................................................................
// access to the following attributes must be protected by a _lock
// .............................................................................

View File

@ -77,7 +77,7 @@ Logfile* Logfile::openExisting(std::string const& filename, Logfile::IdType id,
StatusType status = StatusType::OPEN;
if (df->_isSealed) {
if (df->isSealed()) {
status = StatusType::SEALED;
}
@ -91,57 +91,8 @@ Logfile* Logfile::openExisting(std::string const& filename, Logfile::IdType id,
return logfile;
}
/// @brief whether or not a logfile is empty
int Logfile::judge(std::string const& filename) {
off_t filesize = basics::FileUtils::size(filename);
if (filesize == 0) {
// empty logfile
return TRI_ERROR_ARANGO_DATAFILE_EMPTY;
}
if (filesize < static_cast<off_t>(256 * sizeof(uint64_t))) {
// too small
return TRI_ERROR_ARANGO_DATAFILE_UNREADABLE;
}
int fd = TRI_OPEN(filename.c_str(), O_RDWR | TRI_O_CLOEXEC);
if (fd < 0) {
return TRI_ERROR_ARANGO_DATAFILE_UNREADABLE;
}
uint64_t buffer[256];
if (!TRI_ReadPointer(fd, &buffer, 256 * sizeof(uint64_t))) {
TRI_CLOSE(fd);
return TRI_ERROR_ARANGO_DATAFILE_UNREADABLE;
}
uint64_t* ptr = buffer;
uint64_t* end = buffer + 256;
while (ptr < end) {
if (*ptr != 0) {
TRI_CLOSE(fd);
return TRI_ERROR_NO_ERROR;
}
++ptr;
}
TRI_CLOSE(fd);
return TRI_ERROR_ARANGO_DATAFILE_EMPTY;
}
/// @brief reserve space and update the current write position
char* Logfile::reserve(size_t size) {
size = DatafileHelper::AlignedSize<size_t>(size);
char* result = _df->_next;
_df->_next += size;
_df->_currentSize += static_cast<TRI_voc_size_t>(size);
return result;
return _df->advanceWritePosition(DatafileHelper::AlignedSize<size_t>(size));
}

View File

@ -68,9 +68,6 @@ class Logfile {
/// @brief open an existing logfile
static Logfile* openExisting(std::string const&, Logfile::IdType, bool, bool);
/// @brief whether or not a logfile is empty
static int judge(std::string const&);
int lockInMemory() {
return _df->lockInMemory();
}

View File

@ -2077,7 +2077,7 @@ int LogfileManager::inspectLogfiles() {
TRI_ASSERT((*it).second == nullptr);
int res = Logfile::judge(filename);
int res = TRI_datafile_t::judge(filename);
if (res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
_recoverState->emptyLogfiles.push_back(filename);

View File

@ -473,7 +473,7 @@ void Slots::getActiveLogfileRegion(Logfile* logfile, char const*& begin,
TRI_datafile_t* datafile = logfile->df();
begin = datafile->_data;
end = begin + datafile->_currentSize;
end = begin + datafile->currentSize();
}
/// @brief get the current tick range of a logfile

View File

@ -269,9 +269,9 @@
model = JSON.stringify(model);
if (this.type._from && this.type._to) {
var callbackE = function (error) {
var callbackE = function (error, data) {
if (error) {
arangoHelper.arangoError('Error', 'Could not save edge.');
arangoHelper.arangoError('Error', data.responseJSON.errorMessage);
} else {
this.successConfirmation();
this.disableSaveButton();
@ -280,9 +280,9 @@
this.collection.saveEdge(this.colid, this.docid, this.type._from, this.type._to, model, callbackE);
} else {
var callback = function (error) {
var callback = function (error, data) {
if (error) {
arangoHelper.arangoError('Error', 'Could not save document.');
arangoHelper.arangoError('Error', data.responseJSON.errorMessage);
} else {
this.successConfirmation();
this.disableSaveButton();

View File

@ -606,9 +606,9 @@
var key = $('.modal-body #new-edge-key-attr').last().val();
var url;
var callback = function (error, data) {
var callback = function (error, data, msg) {
if (error) {
arangoHelper.arangoError('Error', 'Could not create edge');
arangoHelper.arangoError('Error', msg.errorMessage);
} else {
window.modalView.hide();
data = data._id.split('/');
@ -635,9 +635,9 @@
var key = $('.modal-body #new-document-key-attr').last().val();
var url;
var callback = function (error, data) {
var callback = function (error, data, msg) {
if (error) {
arangoHelper.arangoError('Error', 'Could not create document');
arangoHelper.arangoError('Error', msg.errorMessage);
} else {
window.modalView.hide();
data = data.split('/');

View File

@ -73,6 +73,7 @@
truncateCollection: function () {
this.model.truncateCollection();
$('.modal-delete-confirmation').hide();
window.modalView.hide();
},

View File

@ -1368,7 +1368,7 @@ function startInstanceCluster (instanceInfo, protocol, options,
++count;
if (count % 60 === 0) {
if (count % 180 === 0) {
if (!checkArangoAlive(arangod, options)) {
throw new Error('startup failed! bailing out!');
}

View File

@ -66,15 +66,15 @@ function MovingShardsSuite () {
console.info("Waiting for synchronous replication to settle...");
global.ArangoClusterInfo.flush();
for (var i = 0; i < c.length; ++i) {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(database,
c[i].name());
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
database, c[i].name());
var shards = Object.keys(cinfo.shards);
var replFactor = cinfo.shards[shards[0]].length;
var count = 0;
while (++count <= 120) {
var ccinfo = shards.map(
s => global.ArangoClusterInfo.getCollectionInfoCurrent(database,
c[i].name(), s)
s => global.ArangoClusterInfo.getCollectionInfoCurrent(
database, c[i].name(), s)
);
let replicas = ccinfo.map(s => s.servers.length);
if (_.every(replicas, x => x === replFactor)) {
@ -96,13 +96,19 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
function getCleanedOutServers() {
var coordEndpoint = global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var coordEndpoint =
global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var request = require("@arangodb/request");
var endpointToURL = require("@arangodb/cluster").endpointToURL;
var url = endpointToURL(coordEndpoint);
var res = request({ method: "GET",
url: url + "/_admin/cluster/numberOfServers"});
try {
var res = request(
{ method: "GET", url: url + "/_admin/cluster/numberOfServers" });
} catch (err) {
console.error(
"Exception for POST /_admin/cluster/cleanOutServer:", err.stack);
return [];
}
var body = res.body;
if (typeof body === "string") {
body = JSON.parse(body);
@ -179,14 +185,21 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
function cleanOutServer(id) {
var coordEndpoint = global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var coordEndpoint =
global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var request = require("@arangodb/request");
var endpointToURL = require("@arangodb/cluster").endpointToURL;
var url = endpointToURL(coordEndpoint);
var body = {"server": id};
return request({ method: "POST",
url: url + "/_admin/cluster/cleanOutServer",
body: JSON.stringify(body) });
try {
return request({ method: "POST",
url: url + "/_admin/cluster/cleanOutServer",
body: JSON.stringify(body) });
} catch (err) {
console.error(
"Exception for POST /_admin/cluster/cleanOutServer:", err.stack);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
@ -194,14 +207,21 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
function shrinkCluster(toNum) {
var coordEndpoint = global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var coordEndpoint =
global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var request = require("@arangodb/request");
var endpointToURL = require("@arangodb/cluster").endpointToURL;
var url = endpointToURL(coordEndpoint);
var body = {"numberOfDBServers":toNum};
return request({ method: "PUT",
url: url + "/_admin/cluster/numberOfServers",
body: JSON.stringify(body) });
try {
return request({ method: "PUT",
url: url + "/_admin/cluster/numberOfServers",
body: JSON.stringify(body) });
} catch (err) {
console.error(
"Exception for PUT /_admin/cluster/numberOfServers:", err.stack);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
@ -209,7 +229,8 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
function resetCleanedOutServers() {
var coordEndpoint = global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var coordEndpoint =
global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var request = require("@arangodb/request");
var endpointToURL = require("@arangodb/cluster").endpointToURL;
var url = endpointToURL(coordEndpoint);
@ -217,13 +238,13 @@ function MovingShardsSuite () {
var body = {"cleanedServers":[], "numberOfDBServers":numberOfDBServers};
try {
var res = request({ method: "PUT",
url: url + "/_admin/cluster/numberOfServers",
body: JSON.stringify(body) });
url: url + "/_admin/cluster/numberOfServers",
body: JSON.stringify(body) });
return res;
}
catch (err) {
console.error("Exception for PUT /_admin/cluster/numberOfServers:",
err.stack);
console.error(
"Exception for PUT /_admin/cluster/numberOfServers:", err.stack);
return false;
}
}
@ -233,14 +254,21 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
function moveShard(database, collection, shard, fromServer, toServer) {
var coordEndpoint = global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var coordEndpoint =
global.ArangoClusterInfo.getServerEndpoint("Coordinator0001");
var request = require("@arangodb/request");
var endpointToURL = require("@arangodb/cluster").endpointToURL;
var url = endpointToURL(coordEndpoint);
var body = {database, collection, shard, fromServer, toServer};
return request({ method: "POST",
url: url + "/_admin/cluster/moveShard",
body: JSON.stringify(body) });
try {
return request({ method: "POST",
url: url + "/_admin/cluster/moveShard",
body: JSON.stringify(body) });
} catch (err) {
console.error(
"Exception for PUT /_admin/cluster/numberOfServers:", err.stack);
return false;
}
}
////////////////////////////////////////////////////////////////////////////////
@ -277,16 +305,9 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
function findServerNotOnList(list) {
var count = 0;
/*var str = "" + count;
var pad = "0000";
var ans = pad.substring(0, pad.length - str.length) + str;
var name = "DBServer" + ans;*/
var count = 0;
while (list.indexOf(dbservers[count]) >= 0) {
count += 1;
}
return dbservers[count];
}
@ -355,18 +376,18 @@ function MovingShardsSuite () {
////////////////////////////////////////////////////////////////////////////////
/// @brief cleaning out collection with one shard without replication
////////////////////////////////////////////////////////////////////////////////
testShrinkNoReplication : function() {
assertTrue(waitForSynchronousReplication("_system"));
var _dbservers = dbservers;
_dbservers.sort();
shrinkCluster(4);
assertTrue(shrinkCluster(4));
assertTrue(testServerEmpty(dbservers[4], true));
assertTrue(waitForSupervision());
shrinkCluster(3);
assertTrue(shrinkCluster(3));
assertTrue(testServerEmpty(dbservers[3], true));
assertTrue(waitForSupervision());
shrinkCluster(2);
assertTrue(shrinkCluster(2));
assertTrue(testServerEmpty(dbservers[2], true));
assertTrue(waitForSupervision());
},
@ -383,7 +404,7 @@ function MovingShardsSuite () {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
"_system", c[0].name());
var shard = Object.keys(cinfo.shards)[0];
moveShard("_system", c[0]._id, shard, fromServer, toServer);
assertTrue(moveShard("_system", c[0]._id, shard, fromServer, toServer));
assertTrue(testServerEmpty(fromServer), false);
assertTrue(waitForSupervision());
},
@ -400,7 +421,7 @@ function MovingShardsSuite () {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
"_system", c[0].name());
var shard = Object.keys(cinfo.shards)[0];
moveShard("_system", c[0]._id, shard, fromServer, toServer);
assertTrue(moveShard("_system", c[0]._id, shard, fromServer, toServer));
assertTrue(testServerEmpty(fromServer), false);
assertTrue(waitForSupervision());
},
@ -418,7 +439,7 @@ function MovingShardsSuite () {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
"_system", c[1].name());
var shard = Object.keys(cinfo.shards)[0];
moveShard("_system", c[1]._id, shard, fromServer, toServer);
assertTrue(moveShard("_system", c[1]._id, shard, fromServer, toServer));
assertTrue(testServerEmpty(fromServer, false, 1, 1));
assertTrue(waitForSupervision());
},
@ -436,7 +457,7 @@ function MovingShardsSuite () {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
"_system", c[1].name());
var shard = Object.keys(cinfo.shards)[0];
moveShard("_system", c[1]._id, shard, fromServer, toServer);
assertTrue(moveShard("_system", c[1]._id, shard, fromServer, toServer));
assertTrue(testServerEmpty(fromServer, false, 1, 1));
assertTrue(waitForSupervision());
},
@ -454,7 +475,7 @@ function MovingShardsSuite () {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
"_system", c[1].name());
var shard = Object.keys(cinfo.shards)[0];
moveShard("_system", c[1]._id, shard, fromServer, toServer);
assertTrue(moveShard("_system", c[1]._id, shard, fromServer, toServer));
assertTrue(testServerEmpty(fromServer, false, 1, 1));
assertTrue(waitForSupervision());
},
@ -472,7 +493,7 @@ function MovingShardsSuite () {
var cinfo = global.ArangoClusterInfo.getCollectionInfo(
"_system", c[1].name());
var shard = Object.keys(cinfo.shards)[0];
moveShard("_system", c[1]._id, shard, fromServer, toServer);
assertTrue(moveShard("_system", c[1]._id, shard, fromServer, toServer));
assertTrue(testServerEmpty(fromServer, false, 1, 1));
assertTrue(waitForSupervision());
},
@ -485,7 +506,7 @@ function MovingShardsSuite () {
assertTrue(waitForSynchronousReplication("_system"));
var servers = findCollectionServers("_system", c[0].name());
var toClean = servers[1];
cleanOutServer(toClean);
assertTrue(cleanOutServer(toClean));
assertTrue(testServerEmpty(toClean, true));
assertTrue(waitForSupervision());
},
@ -498,7 +519,7 @@ function MovingShardsSuite () {
assertTrue(waitForSynchronousReplication("_system"));
var servers = findCollectionServers("_system", c[0].name());
var toClean = servers[0];
cleanOutServer(toClean);
assertTrue(cleanOutServer(toClean));
assertTrue(testServerEmpty(toClean, true));
assertTrue(waitForSupervision());
},
@ -512,7 +533,7 @@ function MovingShardsSuite () {
assertTrue(waitForSynchronousReplication("_system"));
var servers = findCollectionServers("_system", c[1].name());
var toClean = servers[0];
cleanOutServer(toClean);
assertTrue(cleanOutServer(toClean));
assertTrue(testServerEmpty(toClean, true));
assertTrue(waitForSupervision());
},
@ -526,7 +547,7 @@ function MovingShardsSuite () {
assertTrue(waitForSynchronousReplication("_system"));
var servers = findCollectionServers("_system", c[1].name());
var toClean = servers[0];
cleanOutServer(toClean);
assertTrue(cleanOutServer(toClean));
assertTrue(testServerEmpty(toClean, true));
assertTrue(waitForSupervision());
},
@ -540,7 +561,7 @@ function MovingShardsSuite () {
assertTrue(waitForSynchronousReplication("_system"));
var servers = findCollectionServers("_system", c[1].name());
var toClean = servers[1];
cleanOutServer(toClean);
assertTrue(cleanOutServer(toClean));
assertTrue(testServerEmpty(toClean, true));
assertTrue(waitForSupervision());
},
@ -554,7 +575,7 @@ function MovingShardsSuite () {
assertTrue(waitForSynchronousReplication("_system"));
var servers = findCollectionServers("_system", c[1].name());
var toClean = servers[0];
cleanOutServer(toClean);
assertTrue(cleanOutServer(toClean));
assertTrue(testServerEmpty(toClean, true));
assertTrue(waitForSupervision());
},