mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'engine-api' of github.com:arangodb/arangodb into engine-api
This commit is contained in:
commit
15bbfc7b0c
|
@ -18,11 +18,11 @@ done
|
||||||
in progress
|
in progress
|
||||||
-----------
|
-----------
|
||||||
- Rename OperationCursor->getMoreMptr => getMoreTokens, "returns" std::vector<TOKEN>&
|
- Rename OperationCursor->getMoreMptr => getMoreTokens, "returns" std::vector<TOKEN>&
|
||||||
|
- move engine-specific parts of transaction.cpp into engine
|
||||||
|
- transaction API
|
||||||
|
|
||||||
to do
|
to do
|
||||||
-----
|
-----
|
||||||
- move engine-specific parts of transaction.cpp into engine
|
|
||||||
- transaction API
|
|
||||||
- check for illegal includes
|
- check for illegal includes
|
||||||
- fix includes during API conversion
|
- fix includes during API conversion
|
||||||
- DML API
|
- DML API
|
||||||
|
|
|
@ -23,6 +23,8 @@
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
#include "AgencyComm.h"
|
#include "AgencyComm.h"
|
||||||
|
#include "ApplicationFeatures/ApplicationServer.h"
|
||||||
|
#include "RestServer/ServerFeature.h"
|
||||||
|
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#ifdef DEBUG_SYNC_REPLICATION
|
#ifdef DEBUG_SYNC_REPLICATION
|
||||||
|
@ -1322,6 +1324,18 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
||||||
// timeout exit startegy
|
// timeout exit startegy
|
||||||
if (std::chrono::steady_clock::now() < timeOut) {
|
if (std::chrono::steady_clock::now() < timeOut) {
|
||||||
if (tries > 0) {
|
if (tries > 0) {
|
||||||
|
auto serverFeature =
|
||||||
|
application_features::ApplicationServer::getFeature<ServerFeature>(
|
||||||
|
"Server");
|
||||||
|
if (serverFeature->isStopping()) {
|
||||||
|
LOG_TOPIC(INFO, Logger::AGENCYCOMM)
|
||||||
|
<< "Unsuccessful AgencyComm: Timeout because of shutdown "
|
||||||
|
<< "errorCode: " << result.errorCode()
|
||||||
|
<< " errorMessage: " << result.errorMessage()
|
||||||
|
<< " errorDetails: " << result.errorDetails();
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
std::this_thread::sleep_for(waitUntil-std::chrono::steady_clock::now());
|
std::this_thread::sleep_for(waitUntil-std::chrono::steady_clock::now());
|
||||||
if (waitInterval.count() == 0.0) {
|
if (waitInterval.count() == 0.0) {
|
||||||
waitInterval = std::chrono::duration<double>(0.25);
|
waitInterval = std::chrono::duration<double>(0.25);
|
||||||
|
|
|
@ -3517,6 +3517,11 @@ void RestReplicationHandler::handleCommandHoldReadLockCollection() {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
CONDITION_LOCKER(locker, _condVar);
|
||||||
|
_holdReadLockJobs.emplace(id, false);
|
||||||
|
}
|
||||||
|
|
||||||
auto trxContext = StandaloneTransactionContext::Create(_vocbase);
|
auto trxContext = StandaloneTransactionContext::Create(_vocbase);
|
||||||
SingleCollectionTransaction trx(trxContext, col->cid(), TRI_TRANSACTION_READ);
|
SingleCollectionTransaction trx(trxContext, col->cid(), TRI_TRANSACTION_READ);
|
||||||
trx.addHint(TRI_TRANSACTION_HINT_LOCK_ENTIRELY, false);
|
trx.addHint(TRI_TRANSACTION_HINT_LOCK_ENTIRELY, false);
|
||||||
|
@ -3530,7 +3535,16 @@ void RestReplicationHandler::handleCommandHoldReadLockCollection() {
|
||||||
|
|
||||||
{
|
{
|
||||||
CONDITION_LOCKER(locker, _condVar);
|
CONDITION_LOCKER(locker, _condVar);
|
||||||
_holdReadLockJobs.insert(id);
|
auto it = _holdReadLockJobs.find(id);
|
||||||
|
if (it == _holdReadLockJobs.end()) {
|
||||||
|
// Entry has been removed since, so we cancel the whole thing
|
||||||
|
// right away and generate an error:
|
||||||
|
generateError(rest::ResponseCode::SERVER_ERROR,
|
||||||
|
TRI_ERROR_TRANSACTION_INTERNAL,
|
||||||
|
"read transaction was cancelled");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
it->second = true; // mark the read lock as acquired
|
||||||
}
|
}
|
||||||
|
|
||||||
double now = TRI_microtime();
|
double now = TRI_microtime();
|
||||||
|
@ -3588,6 +3602,8 @@ void RestReplicationHandler::handleCommandCheckHoldReadLockCollection() {
|
||||||
}
|
}
|
||||||
std::string id = idSlice.copyString();
|
std::string id = idSlice.copyString();
|
||||||
|
|
||||||
|
bool lockHeld = false;
|
||||||
|
|
||||||
{
|
{
|
||||||
CONDITION_LOCKER(locker, _condVar);
|
CONDITION_LOCKER(locker, _condVar);
|
||||||
auto it = _holdReadLockJobs.find(id);
|
auto it = _holdReadLockJobs.find(id);
|
||||||
|
@ -3596,12 +3612,17 @@ void RestReplicationHandler::handleCommandCheckHoldReadLockCollection() {
|
||||||
"no hold read lock job found for 'id'");
|
"no hold read lock job found for 'id'");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
if (it->second) {
|
||||||
|
lockHeld = true;
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VPackBuilder b;
|
VPackBuilder b;
|
||||||
{
|
{
|
||||||
VPackObjectBuilder bb(&b);
|
VPackObjectBuilder bb(&b);
|
||||||
b.add("error", VPackValue(false));
|
b.add("error", VPackValue(false));
|
||||||
|
b.add("lockHeld", VPackValue(lockHeld));
|
||||||
}
|
}
|
||||||
|
|
||||||
generateResult(rest::ResponseCode::OK, b.slice());
|
generateResult(rest::ResponseCode::OK, b.slice());
|
||||||
|
@ -3633,11 +3654,19 @@ void RestReplicationHandler::handleCommandCancelHoldReadLockCollection() {
|
||||||
}
|
}
|
||||||
std::string id = idSlice.copyString();
|
std::string id = idSlice.copyString();
|
||||||
|
|
||||||
|
bool lockHeld = false;
|
||||||
{
|
{
|
||||||
CONDITION_LOCKER(locker, _condVar);
|
CONDITION_LOCKER(locker, _condVar);
|
||||||
auto it = _holdReadLockJobs.find(id);
|
auto it = _holdReadLockJobs.find(id);
|
||||||
if (it != _holdReadLockJobs.end()) {
|
if (it != _holdReadLockJobs.end()) {
|
||||||
|
// Note that this approach works if the lock has been acquired
|
||||||
|
// as well as if we still wait for the read lock, in which case
|
||||||
|
// it will eventually be acquired but immediately released:
|
||||||
|
if (it->second) {
|
||||||
|
lockHeld = true;
|
||||||
|
}
|
||||||
_holdReadLockJobs.erase(it);
|
_holdReadLockJobs.erase(it);
|
||||||
|
_condVar.broadcast();
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -3645,6 +3674,7 @@ void RestReplicationHandler::handleCommandCancelHoldReadLockCollection() {
|
||||||
{
|
{
|
||||||
VPackObjectBuilder bb(&b);
|
VPackObjectBuilder bb(&b);
|
||||||
b.add("error", VPackValue(false));
|
b.add("error", VPackValue(false));
|
||||||
|
b.add("lockHeld", VPackValue(lockHeld));
|
||||||
}
|
}
|
||||||
|
|
||||||
generateResult(rest::ResponseCode::OK, b.slice());
|
generateResult(rest::ResponseCode::OK, b.slice());
|
||||||
|
@ -3677,4 +3707,4 @@ arangodb::basics::ConditionVariable RestReplicationHandler::_condVar;
|
||||||
/// the flag is set of the ID of a job, the job is cancelled
|
/// the flag is set of the ID of a job, the job is cancelled
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
std::unordered_set<std::string> RestReplicationHandler::_holdReadLockJobs;
|
std::unordered_map<std::string, bool> RestReplicationHandler::_holdReadLockJobs;
|
||||||
|
|
|
@ -366,13 +366,18 @@ class RestReplicationHandler : public RestVocbaseBaseHandler {
|
||||||
static arangodb::basics::ConditionVariable _condVar;
|
static arangodb::basics::ConditionVariable _condVar;
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief global set of ids of holdReadLockCollection jobs, if
|
/// @brief global set of ids of holdReadLockCollection jobs, an
|
||||||
/// an id is removed here (under the protection of the mutex of
|
/// id mapping to false here indicates that a request to get the
|
||||||
/// condVar) and a broadcast is sent, the job with that id is
|
/// read lock has been started, the bool is changed to true once
|
||||||
/// terminated.
|
/// this read lock is acquired. To cancel the read lock, remove
|
||||||
|
/// the entry here (under the protection of the mutex of
|
||||||
|
/// condVar) and send a broadcast to the condition variable,
|
||||||
|
/// the job with that id is terminated. If it timeouts, then
|
||||||
|
/// the read lock is released automatically and the entry here
|
||||||
|
/// is deleted.
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
static std::unordered_set<std::string> _holdReadLockJobs;
|
static std::unordered_map<std::string, bool> _holdReadLockJobs;
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -190,6 +190,7 @@ void ServerFeature::beginShutdown() {
|
||||||
std::string msg =
|
std::string msg =
|
||||||
ArangoGlobalContext::CONTEXT->binaryName() + " [shutting down]";
|
ArangoGlobalContext::CONTEXT->binaryName() + " [shutting down]";
|
||||||
TRI_SetProcessTitle(msg.c_str());
|
TRI_SetProcessTitle(msg.c_str());
|
||||||
|
_isStopping = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void ServerFeature::waitForHeartbeat() {
|
void ServerFeature::waitForHeartbeat() {
|
||||||
|
|
|
@ -47,6 +47,7 @@ class ServerFeature final : public application_features::ApplicationFeature {
|
||||||
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
|
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
|
||||||
void start() override final;
|
void start() override final;
|
||||||
void beginShutdown() override final;
|
void beginShutdown() override final;
|
||||||
|
bool isStopping() const { return _isStopping; }
|
||||||
|
|
||||||
public:
|
public:
|
||||||
OperationMode operationMode() const { return _operationMode; }
|
OperationMode operationMode() const { return _operationMode; }
|
||||||
|
@ -70,6 +71,7 @@ class ServerFeature final : public application_features::ApplicationFeature {
|
||||||
uint32_t _vppMaxSize;
|
uint32_t _vppMaxSize;
|
||||||
int* _result;
|
int* _result;
|
||||||
OperationMode _operationMode;
|
OperationMode _operationMode;
|
||||||
|
bool _isStopping = false;
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -166,7 +166,12 @@ Scheduler::Scheduler(size_t nrThreads, size_t maxQueueSize)
|
||||||
initializeSignalHandlers();
|
initializeSignalHandlers();
|
||||||
}
|
}
|
||||||
|
|
||||||
Scheduler::~Scheduler() { deleteOldThreads(); }
|
Scheduler::~Scheduler() {
|
||||||
|
if (_threadManager != nullptr) {
|
||||||
|
_threadManager->cancel();
|
||||||
|
}
|
||||||
|
deleteOldThreads();
|
||||||
|
}
|
||||||
|
|
||||||
// -----------------------------------------------------------------------------
|
// -----------------------------------------------------------------------------
|
||||||
// --SECTION-- constructors and destructors
|
// --SECTION-- constructors and destructors
|
||||||
|
|
|
@ -1823,6 +1823,25 @@ OperationResult Transaction::insertCoordinator(std::string const& collectionName
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
/// @brief choose a timeout for synchronous replication, based on the
|
||||||
|
/// number of documents we ship over
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
static double chooseTimeout(size_t count) {
|
||||||
|
// We usually assume that a server can process at least 5000 documents
|
||||||
|
// per second (this is a low estimate), and use a low limit of 0.5s
|
||||||
|
// and a high timeout of 120s
|
||||||
|
double timeout = count / 5000;
|
||||||
|
if (timeout < 0.5) {
|
||||||
|
return 0.5;
|
||||||
|
} else if (timeout > 120) {
|
||||||
|
return 120.0;
|
||||||
|
} else {
|
||||||
|
return timeout;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief create one or multiple documents in a collection, local
|
/// @brief create one or multiple documents in a collection, local
|
||||||
/// the single-document variant of this operation will either succeed or,
|
/// the single-document variant of this operation will either succeed or,
|
||||||
|
@ -1980,7 +1999,7 @@ OperationResult Transaction::insertLocal(std::string const& collectionName,
|
||||||
}
|
}
|
||||||
auto cc = arangodb::ClusterComm::instance();
|
auto cc = arangodb::ClusterComm::instance();
|
||||||
size_t nrDone = 0;
|
size_t nrDone = 0;
|
||||||
size_t nrGood = cc->performRequests(requests, TRX_FOLLOWER_TIMEOUT,
|
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||||
nrDone, Logger::REPLICATION);
|
nrDone, Logger::REPLICATION);
|
||||||
if (nrGood < followers->size()) {
|
if (nrGood < followers->size()) {
|
||||||
// we drop all followers that were not successful:
|
// we drop all followers that were not successful:
|
||||||
|
@ -2274,6 +2293,7 @@ OperationResult Transaction::modifyLocal(
|
||||||
};
|
};
|
||||||
|
|
||||||
VPackSlice ourResult = resultBuilder.slice();
|
VPackSlice ourResult = resultBuilder.slice();
|
||||||
|
size_t count = 0;
|
||||||
if (multiCase) {
|
if (multiCase) {
|
||||||
VPackArrayBuilder guard(&payload);
|
VPackArrayBuilder guard(&payload);
|
||||||
VPackArrayIterator itValue(newValue);
|
VPackArrayIterator itValue(newValue);
|
||||||
|
@ -2282,6 +2302,7 @@ OperationResult Transaction::modifyLocal(
|
||||||
TRI_ASSERT((*itResult).isObject());
|
TRI_ASSERT((*itResult).isObject());
|
||||||
if (!(*itResult).hasKey("error")) {
|
if (!(*itResult).hasKey("error")) {
|
||||||
doOneDoc(itValue.value(), itResult.value());
|
doOneDoc(itValue.value(), itResult.value());
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
itValue.next();
|
itValue.next();
|
||||||
itResult.next();
|
itResult.next();
|
||||||
|
@ -2289,7 +2310,9 @@ OperationResult Transaction::modifyLocal(
|
||||||
} else {
|
} else {
|
||||||
VPackArrayBuilder guard(&payload);
|
VPackArrayBuilder guard(&payload);
|
||||||
doOneDoc(newValue, ourResult);
|
doOneDoc(newValue, ourResult);
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
|
if (count > 0) {
|
||||||
auto body = std::make_shared<std::string>();
|
auto body = std::make_shared<std::string>();
|
||||||
*body = payload.slice().toJson();
|
*body = payload.slice().toJson();
|
||||||
|
|
||||||
|
@ -2303,8 +2326,8 @@ OperationResult Transaction::modifyLocal(
|
||||||
path, body);
|
path, body);
|
||||||
}
|
}
|
||||||
size_t nrDone = 0;
|
size_t nrDone = 0;
|
||||||
size_t nrGood = cc->performRequests(requests, TRX_FOLLOWER_TIMEOUT, nrDone,
|
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||||
Logger::REPLICATION);
|
nrDone, Logger::REPLICATION);
|
||||||
if (nrGood < followers->size()) {
|
if (nrGood < followers->size()) {
|
||||||
// we drop all followers that were not successful:
|
// we drop all followers that were not successful:
|
||||||
for (size_t i = 0; i < followers->size(); ++i) {
|
for (size_t i = 0; i < followers->size(); ++i) {
|
||||||
|
@ -2330,6 +2353,7 @@ OperationResult Transaction::modifyLocal(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && options.silent) {
|
if (doingSynchronousReplication && options.silent) {
|
||||||
// We needed the results, but do not want to report:
|
// We needed the results, but do not want to report:
|
||||||
|
@ -2523,6 +2547,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
};
|
};
|
||||||
|
|
||||||
VPackSlice ourResult = resultBuilder.slice();
|
VPackSlice ourResult = resultBuilder.slice();
|
||||||
|
size_t count = 0;
|
||||||
if (value.isArray()) {
|
if (value.isArray()) {
|
||||||
VPackArrayBuilder guard(&payload);
|
VPackArrayBuilder guard(&payload);
|
||||||
VPackArrayIterator itValue(value);
|
VPackArrayIterator itValue(value);
|
||||||
|
@ -2531,6 +2556,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
TRI_ASSERT((*itResult).isObject());
|
TRI_ASSERT((*itResult).isObject());
|
||||||
if (!(*itResult).hasKey("error")) {
|
if (!(*itResult).hasKey("error")) {
|
||||||
doOneDoc(itValue.value(), itResult.value());
|
doOneDoc(itValue.value(), itResult.value());
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
itValue.next();
|
itValue.next();
|
||||||
itResult.next();
|
itResult.next();
|
||||||
|
@ -2538,7 +2564,9 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
} else {
|
} else {
|
||||||
VPackArrayBuilder guard(&payload);
|
VPackArrayBuilder guard(&payload);
|
||||||
doOneDoc(value, ourResult);
|
doOneDoc(value, ourResult);
|
||||||
|
count++;
|
||||||
}
|
}
|
||||||
|
if (count > 0) {
|
||||||
auto body = std::make_shared<std::string>();
|
auto body = std::make_shared<std::string>();
|
||||||
*body = payload.slice().toJson();
|
*body = payload.slice().toJson();
|
||||||
|
|
||||||
|
@ -2550,8 +2578,8 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
path, body);
|
path, body);
|
||||||
}
|
}
|
||||||
size_t nrDone = 0;
|
size_t nrDone = 0;
|
||||||
size_t nrGood = cc->performRequests(requests, TRX_FOLLOWER_TIMEOUT, nrDone,
|
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||||
Logger::REPLICATION);
|
nrDone, Logger::REPLICATION);
|
||||||
if (nrGood < followers->size()) {
|
if (nrGood < followers->size()) {
|
||||||
// we drop all followers that were not successful:
|
// we drop all followers that were not successful:
|
||||||
for (size_t i = 0; i < followers->size(); ++i) {
|
for (size_t i = 0; i < followers->size(); ++i) {
|
||||||
|
@ -2577,6 +2605,7 @@ OperationResult Transaction::removeLocal(std::string const& collectionName,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
if (doingSynchronousReplication && options.silent) {
|
if (doingSynchronousReplication && options.silent) {
|
||||||
// We needed the results, but do not want to report:
|
// We needed the results, but do not want to report:
|
||||||
|
|
|
@ -192,6 +192,7 @@ const optionsDefaults = {
|
||||||
'valgrindArgs': {},
|
'valgrindArgs': {},
|
||||||
'valgrindHosts': false,
|
'valgrindHosts': false,
|
||||||
'verbose': false,
|
'verbose': false,
|
||||||
|
'walFlushTimeout': 30000,
|
||||||
'writeXmlReport': true
|
'writeXmlReport': true
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -248,7 +249,7 @@ let LOGS_DIR;
|
||||||
let UNITTESTS_DIR;
|
let UNITTESTS_DIR;
|
||||||
let GDB_OUTPUT="";
|
let GDB_OUTPUT="";
|
||||||
|
|
||||||
function makeResults (testname) {
|
function makeResults (testname, instanceInfo) {
|
||||||
const startTime = time();
|
const startTime = time();
|
||||||
|
|
||||||
return function (status, message) {
|
return function (status, message) {
|
||||||
|
@ -259,7 +260,7 @@ function makeResults (testname) {
|
||||||
let result;
|
let result;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
result = JSON.parse(fs.read('testresult.json'));
|
result = JSON.parse(fs.read(instanceInfo.rootDir + '/testresult.json'));
|
||||||
|
|
||||||
if ((typeof result[0] === 'object') &&
|
if ((typeof result[0] === 'object') &&
|
||||||
result[0].hasOwnProperty('status')) {
|
result[0].hasOwnProperty('status')) {
|
||||||
|
@ -312,6 +313,7 @@ function makeArgsArangod (options, appDir, role) {
|
||||||
return {
|
return {
|
||||||
'configuration': 'etc/testing/' + config,
|
'configuration': 'etc/testing/' + config,
|
||||||
'define': 'TOP_DIR=' + TOP_DIR,
|
'define': 'TOP_DIR=' + TOP_DIR,
|
||||||
|
'wal.flush-timeout': options.walFlushTimeout,
|
||||||
'javascript.app-path': appDir,
|
'javascript.app-path': appDir,
|
||||||
'http.trusted-origin': options.httpTrustedOrigin || 'all'
|
'http.trusted-origin': options.httpTrustedOrigin || 'all'
|
||||||
};
|
};
|
||||||
|
@ -691,9 +693,7 @@ function runThere (options, instanceInfo, file) {
|
||||||
'return runTest(' + JSON.stringify(file) + ', true' + mochaGrep + ');';
|
'return runTest(' + JSON.stringify(file) + ', true' + mochaGrep + ');';
|
||||||
}
|
}
|
||||||
|
|
||||||
if (options.propagateInstanceInfo) {
|
|
||||||
testCode = 'global.instanceInfo = ' + JSON.stringify(instanceInfo) + ';\n' + testCode;
|
testCode = 'global.instanceInfo = ' + JSON.stringify(instanceInfo) + ';\n' + testCode;
|
||||||
}
|
|
||||||
|
|
||||||
let httpOptions = makeAuthorizationHeaders(options);
|
let httpOptions = makeAuthorizationHeaders(options);
|
||||||
httpOptions.method = 'POST';
|
httpOptions.method = 'POST';
|
||||||
|
@ -1078,12 +1078,12 @@ function runInArangosh (options, instanceInfo, file, addArgs) {
|
||||||
if (addArgs !== undefined) {
|
if (addArgs !== undefined) {
|
||||||
args = Object.assign(args, addArgs);
|
args = Object.assign(args, addArgs);
|
||||||
}
|
}
|
||||||
fs.write('instanceinfo.json', JSON.stringify(instanceInfo));
|
require('internal').env.INSTANCEINFO = JSON.stringify(instanceInfo);
|
||||||
let rc = executeAndWait(ARANGOSH_BIN, toArgv(args), options);
|
let rc = executeAndWait(ARANGOSH_BIN, toArgv(args), options);
|
||||||
|
|
||||||
let result;
|
let result;
|
||||||
try {
|
try {
|
||||||
result = JSON.parse(fs.read('testresult.json'));
|
result = JSON.parse(fs.read(instanceInfo.rootDir + '/testresult.json'));
|
||||||
} catch (x) {
|
} catch (x) {
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -1116,6 +1116,7 @@ function runArangoshCmd (options, instanceInfo, addArgs, cmds) {
|
||||||
args = Object.assign(args, addArgs);
|
args = Object.assign(args, addArgs);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
require('internal').env.INSTANCEINFO = JSON.stringify(instanceInfo);
|
||||||
const argv = toArgv(args).concat(cmds);
|
const argv = toArgv(args).concat(cmds);
|
||||||
return executeAndWait(ARANGOSH_BIN, argv, options);
|
return executeAndWait(ARANGOSH_BIN, argv, options);
|
||||||
}
|
}
|
||||||
|
@ -3357,10 +3358,10 @@ testFuncs.recovery = function (options) {
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
testFuncs.replication_ongoing = function (options) {
|
testFuncs.replication_ongoing = function (options) {
|
||||||
const mr = makeResults('replication');
|
|
||||||
|
|
||||||
let master = startInstance('tcp', options, {}, 'master_ongoing');
|
let master = startInstance('tcp', options, {}, 'master_ongoing');
|
||||||
|
|
||||||
|
const mr = makeResults('replication', master);
|
||||||
|
|
||||||
if (master === false) {
|
if (master === false) {
|
||||||
return mr(false, 'failed to start master!');
|
return mr(false, 'failed to start master!');
|
||||||
}
|
}
|
||||||
|
@ -3399,12 +3400,12 @@ testFuncs.replication_ongoing = function (options) {
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
testFuncs.replication_static = function (options) {
|
testFuncs.replication_static = function (options) {
|
||||||
const mr = makeResults('replication');
|
|
||||||
|
|
||||||
let master = startInstance('tcp', options, {
|
let master = startInstance('tcp', options, {
|
||||||
'server.authentication': 'true'
|
'server.authentication': 'true'
|
||||||
}, 'master_static');
|
}, 'master_static');
|
||||||
|
|
||||||
|
const mr = makeResults('replication', master);
|
||||||
|
|
||||||
if (master === false) {
|
if (master === false) {
|
||||||
return mr(false, 'failed to start master!');
|
return mr(false, 'failed to start master!');
|
||||||
}
|
}
|
||||||
|
@ -3455,9 +3456,9 @@ testFuncs.replication_static = function (options) {
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
testFuncs.replication_sync = function (options) {
|
testFuncs.replication_sync = function (options) {
|
||||||
const mr = makeResults('replication');
|
|
||||||
let master = startInstance('tcp', options, {}, 'master_sync');
|
let master = startInstance('tcp', options, {}, 'master_sync');
|
||||||
|
|
||||||
|
const mr = makeResults('replication', master);
|
||||||
if (master === false) {
|
if (master === false) {
|
||||||
return mr(false, 'failed to start master!');
|
return mr(false, 'failed to start master!');
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,25 +56,7 @@ function agencyTestSuite () {
|
||||||
/// @brief the agency servers
|
/// @brief the agency servers
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
var count = 20;
|
var instanceInfo = JSON.parse(require('internal').env.INSTANCEINFO);
|
||||||
while (true) {
|
|
||||||
if (require('fs').exists('instanceinfo.json')) {
|
|
||||||
var instanceInfoData = require('fs').read('instanceinfo.json');
|
|
||||||
var instanceInfo;
|
|
||||||
try {
|
|
||||||
instanceInfo = JSON.parse(instanceInfoData);
|
|
||||||
break;
|
|
||||||
} catch (err) {
|
|
||||||
console.error('Failed to parse JSON: instanceinfo.json');
|
|
||||||
console.error(instanceInfoData);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
wait(1.0);
|
|
||||||
if (--count <= 0) {
|
|
||||||
throw 'peng';
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
var agencyServers = instanceInfo.arangods.map(arangod => {
|
var agencyServers = instanceInfo.arangods.map(arangod => {
|
||||||
return arangod.url;
|
return arangod.url;
|
||||||
});
|
});
|
||||||
|
|
|
@ -10,6 +10,10 @@ var runTest = require('jsunity').runTest,
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
function runJSUnityTests (tests) {
|
function runJSUnityTests (tests) {
|
||||||
|
let instanceinfo = JSON.parse(require('internal').env.INSTANCEINFO);
|
||||||
|
if (!instanceinfo) {
|
||||||
|
throw new Error('env.INSTANCEINFO was not set by caller!');
|
||||||
|
}
|
||||||
var result = true;
|
var result = true;
|
||||||
var allResults = [];
|
var allResults = [];
|
||||||
var failed = [];
|
var failed = [];
|
||||||
|
@ -45,7 +49,7 @@ function runJSUnityTests (tests) {
|
||||||
|
|
||||||
internal.wait(0); // force GC
|
internal.wait(0); // force GC
|
||||||
});
|
});
|
||||||
require('fs').write('testresult.json', JSON.stringify(allResults));
|
require('fs').write(instanceinfo.rootDir + '/testresult.json', JSON.stringify(allResults));
|
||||||
|
|
||||||
if (failed.length > 1) {
|
if (failed.length > 1) {
|
||||||
print('The following ' + failed.length + ' test files produced errors: ', failed.join(', '));
|
print('The following ' + failed.length + ' test files produced errors: ', failed.join(', '));
|
||||||
|
|
|
@ -84,29 +84,46 @@ function startReadLockOnLeader (endpoint, database, collName, timeout) {
|
||||||
var body = { 'id': id, 'collection': collName, 'ttl': timeout };
|
var body = { 'id': id, 'collection': collName, 'ttl': timeout };
|
||||||
r = request({ url: url + '/_api/replication/holdReadLockCollection',
|
r = request({ url: url + '/_api/replication/holdReadLockCollection',
|
||||||
body: JSON.stringify(body),
|
body: JSON.stringify(body),
|
||||||
method: 'POST', headers: {'x-arango-async': 'store'} });
|
method: 'POST', headers: {'x-arango-async': true} });
|
||||||
if (r.status !== 202) {
|
if (r.status !== 202) {
|
||||||
console.error('startReadLockOnLeader: Could not start read lock for shard',
|
console.error('startReadLockOnLeader: Could not start read lock for shard',
|
||||||
collName, r);
|
collName, r);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
var rr = r; // keep a copy
|
|
||||||
|
|
||||||
var count = 0;
|
var count = 0;
|
||||||
while (++count < 20) { // wait for some time until read lock established:
|
while (++count < 20) { // wait for some time until read lock established:
|
||||||
// Now check that we hold the read lock:
|
// Now check that we hold the read lock:
|
||||||
r = request({ url: url + '/_api/replication/holdReadLockCollection',
|
r = request({ url: url + '/_api/replication/holdReadLockCollection',
|
||||||
body: JSON.stringify(body),
|
body: JSON.stringify(body), method: 'PUT' });
|
||||||
method: 'PUT' });
|
|
||||||
if (r.status === 200) {
|
if (r.status === 200) {
|
||||||
return id;
|
let ansBody = {};
|
||||||
|
try {
|
||||||
|
ansBody = JSON.parse(r.body);
|
||||||
|
} catch (err) {
|
||||||
}
|
}
|
||||||
|
if (ansBody.lockHeld) {
|
||||||
|
return id;
|
||||||
|
} else {
|
||||||
|
console.debug('startReadLockOnLeader: Lock not yet acquired...');
|
||||||
|
}
|
||||||
|
} else {
|
||||||
console.debug('startReadLockOnLeader: Do not see read lock yet...');
|
console.debug('startReadLockOnLeader: Do not see read lock yet...');
|
||||||
|
}
|
||||||
wait(0.5);
|
wait(0.5);
|
||||||
}
|
}
|
||||||
var asyncJobId = rr.headers['x-arango-async-id'];
|
console.error('startReadLockOnLeader: giving up');
|
||||||
r = request({ url: url + '/_api/job/' + asyncJobId, body: '', method: 'PUT'});
|
try {
|
||||||
console.error('startReadLockOnLeader: giving up, async result:', r);
|
r = request({ url: url + '/_api/replication/holdReadLockCollection',
|
||||||
|
body: JSON.stringify({'id': id}), method: 'DELETE' });
|
||||||
|
} catch (err2) {
|
||||||
|
console.error('startReadLockOnLeader: expection in cancel:',
|
||||||
|
JSON.stringify(err2));
|
||||||
|
}
|
||||||
|
if (r.status !== 200) {
|
||||||
|
console.error('startReadLockOnLeader: cancelation error for shard',
|
||||||
|
collName, r);
|
||||||
|
}
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -527,8 +544,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
'syncCollectionFinalize:', err3);
|
'syncCollectionFinalize:', err3);
|
||||||
}
|
}
|
||||||
finally {
|
finally {
|
||||||
if (!cancelReadLockOnLeader(ep, database,
|
if (!cancelReadLockOnLeader(ep, database, lockJobId)) {
|
||||||
lockJobId)) {
|
|
||||||
console.error('synchronizeOneShard: read lock has timed out',
|
console.error('synchronizeOneShard: read lock has timed out',
|
||||||
'for shard', shard);
|
'for shard', shard);
|
||||||
ok = false;
|
ok = false;
|
||||||
|
@ -539,7 +555,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
||||||
shard);
|
shard);
|
||||||
}
|
}
|
||||||
if (ok) {
|
if (ok) {
|
||||||
console.debug('synchronizeOneShard: synchronization worked for shard',
|
console.info('synchronizeOneShard: synchronization worked for shard',
|
||||||
shard);
|
shard);
|
||||||
} else {
|
} else {
|
||||||
throw 'Did not work for shard ' + shard + '.';
|
throw 'Did not work for shard ' + shard + '.';
|
||||||
|
@ -920,12 +936,14 @@ function updateCurrentForCollections(localErrors, current) {
|
||||||
return payload;
|
return payload;
|
||||||
}
|
}
|
||||||
|
|
||||||
function makeDropCurrentEntryCollection(dbname, col, shard, trx) {
|
function makeDropCurrentEntryCollection(dbname, col, shard) {
|
||||||
trx[0][curCollections + dbname + '/' + col + '/' + shard] =
|
let trx = {};
|
||||||
|
trx[curCollections + dbname + '/' + col + '/' + shard] =
|
||||||
{op: 'delete'};
|
{op: 'delete'};
|
||||||
|
return trx;
|
||||||
}
|
}
|
||||||
|
|
||||||
let trx = [{}];
|
let trx = {};
|
||||||
|
|
||||||
// Go through local databases and collections and add stuff to Current
|
// Go through local databases and collections and add stuff to Current
|
||||||
// as needed:
|
// as needed:
|
||||||
|
@ -946,7 +964,7 @@ function updateCurrentForCollections(localErrors, current) {
|
||||||
|
|
||||||
let currentCollectionInfo = fetchKey(current, 'Collections', database, shardInfo.planId, shard);
|
let currentCollectionInfo = fetchKey(current, 'Collections', database, shardInfo.planId, shard);
|
||||||
if (!_.isEqual(localCollectionInfo, currentCollectionInfo)) {
|
if (!_.isEqual(localCollectionInfo, currentCollectionInfo)) {
|
||||||
trx[0][curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name] = {
|
trx[curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name] = {
|
||||||
op: 'set',
|
op: 'set',
|
||||||
new: localCollectionInfo,
|
new: localCollectionInfo,
|
||||||
};
|
};
|
||||||
|
@ -955,7 +973,7 @@ function updateCurrentForCollections(localErrors, current) {
|
||||||
let currentServers = fetchKey(current, 'Collections', database, shardInfo.planId, shard, 'servers');
|
let currentServers = fetchKey(current, 'Collections', database, shardInfo.planId, shard, 'servers');
|
||||||
// we were previously leader and we are done resigning. update current and let supervision handle the rest
|
// we were previously leader and we are done resigning. update current and let supervision handle the rest
|
||||||
if (Array.isArray(currentServers) && currentServers[0] === ourselves) {
|
if (Array.isArray(currentServers) && currentServers[0] === ourselves) {
|
||||||
trx[0][curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name + '/servers'] = {
|
trx[curCollections + database + '/' + shardInfo.planId + '/' + shardInfo.name + '/servers'] = {
|
||||||
op: 'set',
|
op: 'set',
|
||||||
new: ['_' + ourselves].concat(db._collection(shardInfo.name).getFollowers()),
|
new: ['_' + ourselves].concat(db._collection(shardInfo.name).getFollowers()),
|
||||||
};
|
};
|
||||||
|
@ -993,8 +1011,7 @@ function updateCurrentForCollections(localErrors, current) {
|
||||||
let cur = currentCollections[database][collection][shard];
|
let cur = currentCollections[database][collection][shard];
|
||||||
if (!localCollections.hasOwnProperty(shard) &&
|
if (!localCollections.hasOwnProperty(shard) &&
|
||||||
cur.servers[0] === ourselves) {
|
cur.servers[0] === ourselves) {
|
||||||
makeDropCurrentEntryCollection(database, collection, shard,
|
Object.assign(trx, makeDropCurrentEntryCollection(database, collection, shard));
|
||||||
trx);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1104,8 +1121,9 @@ function migratePrimary(plan, current) {
|
||||||
// diff current and local and prepare agency transactions or whatever
|
// diff current and local and prepare agency transactions or whatever
|
||||||
// to update current. Will report the errors created locally to the agency
|
// to update current. Will report the errors created locally to the agency
|
||||||
let trx = updateCurrentForCollections(localErrors, current);
|
let trx = updateCurrentForCollections(localErrors, current);
|
||||||
if (trx.length > 0 && Object.keys(trx[0]).length !== 0) {
|
if (Object.keys(trx).length > 0) {
|
||||||
trx[0][curVersion] = {op: 'increment'};
|
trx[curVersion] = {op: 'increment'};
|
||||||
|
trx = [trx];
|
||||||
// TODO: reduce timeout when we can:
|
// TODO: reduce timeout when we can:
|
||||||
try {
|
try {
|
||||||
let res = global.ArangoAgency.write([trx]);
|
let res = global.ArangoAgency.write([trx]);
|
||||||
|
@ -1272,9 +1290,9 @@ function migrateAnyServer(plan, current) {
|
||||||
// diff current and local and prepare agency transactions or whatever
|
// diff current and local and prepare agency transactions or whatever
|
||||||
// to update current. will report the errors created locally to the agency
|
// to update current. will report the errors created locally to the agency
|
||||||
let trx = updateCurrentForDatabases(localErrors, current.Databases);
|
let trx = updateCurrentForDatabases(localErrors, current.Databases);
|
||||||
if (Object.keys(trx).length !== 0) {
|
if (Object.keys(trx).length > 0) {
|
||||||
|
trx[curVersion] = {op: 'increment'};
|
||||||
trx = [trx];
|
trx = [trx];
|
||||||
trx[0][curVersion] = {op: 'increment'};
|
|
||||||
// TODO: reduce timeout when we can:
|
// TODO: reduce timeout when we can:
|
||||||
try {
|
try {
|
||||||
let res = global.ArangoAgency.write([trx]);
|
let res = global.ArangoAgency.write([trx]);
|
||||||
|
|
|
@ -740,7 +740,7 @@ describe('Cluster sync', function() {
|
||||||
expect(db._collection('s100001').isLeader()).to.equal(true);
|
expect(db._collection('s100001').isLeader()).to.equal(true);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
describe('Update current', function() {
|
describe('Update current database', function() {
|
||||||
beforeEach(function() {
|
beforeEach(function() {
|
||||||
db._databases().forEach(database => {
|
db._databases().forEach(database => {
|
||||||
if (database !== '_system') {
|
if (database !== '_system') {
|
||||||
|
@ -854,4 +854,7 @@ describe('Cluster sync', function() {
|
||||||
expect(result['/arango/Current/Databases/testi/repltest']).to.have.deep.property('new.error', false);
|
expect(result['/arango/Current/Databases/testi/repltest']).to.have.deep.property('new.error', false);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
describe('Update current collection', function() {
|
||||||
|
|
||||||
|
});
|
||||||
});
|
});
|
||||||
|
|
Loading…
Reference in New Issue