1
0
Fork 0

replication cleanup

This commit is contained in:
Jan Steemann 2016-02-05 13:08:12 +01:00
parent d63fa1ad81
commit abd382bf19
13 changed files with 281 additions and 301 deletions

View File

@ -262,7 +262,6 @@ add_executable(
VocBase/replication-applier.cpp
VocBase/replication-common.cpp
VocBase/replication-dump.cpp
VocBase/replication-master.cpp
VocBase/server.cpp
VocBase/shape-accessor.cpp
VocBase/shaped-json.cpp

View File

@ -219,7 +219,6 @@ arangod_libarangod_a_SOURCES = \
arangod/VocBase/replication-applier.cpp \
arangod/VocBase/replication-common.cpp \
arangod/VocBase/replication-dump.cpp \
arangod/VocBase/replication-master.cpp \
arangod/VocBase/server.cpp \
arangod/VocBase/shape-accessor.cpp \
arangod/VocBase/shaped-json.cpp \

View File

@ -184,7 +184,7 @@ retry:
{
WRITE_LOCKER_EVENTUAL(writeLocker, _applier->_statusLock, 1000);
LOG_TOPIC(TRACE, Logger::REPLICATION) << "stopped replication applier for database '" << _vocbase->_name << "' with lastProcessedContinuousTick: " << _applier->_state._lastProcessedContinuousTick << ", lastAppliedContinuousTick: " << _applier->_state._lastAppliedContinuousTick << ", safeResumeTick: " << _applier->_state._safeResumeTick;
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "stopped replication applier for database '" << _vocbase->_name << "' with lastProcessedContinuousTick: " << _applier->_state._lastProcessedContinuousTick << ", lastAppliedContinuousTick: " << _applier->_state._lastAppliedContinuousTick << ", safeResumeTick: " << _applier->_state._safeResumeTick;
_applier->_state._lastProcessedContinuousTick = 0;
_applier->_state._lastAppliedContinuousTick = 0;
@ -285,7 +285,10 @@ void ContinuousSyncer::setProgress(char const* msg) {
_applier->setProgress(msg, true);
if (_verbose) {
LOG_TOPIC(INFO, Logger::REPLICATION) << "applier progress: " << msg;
LOG_TOPIC(INFO, Logger::REPLICATION) << msg;
}
else {
LOG_TOPIC(DEBUG, Logger::REPLICATION) << msg;
}
}
@ -294,7 +297,14 @@ void ContinuousSyncer::setProgress(char const* msg) {
////////////////////////////////////////////////////////////////////////////////
void ContinuousSyncer::setProgress(std::string const& msg) {
setProgress(msg.c_str());
_applier->setProgress(msg.c_str(), true);
if (_verbose) {
LOG_TOPIC(INFO, Logger::REPLICATION) << msg;
}
else {
LOG_TOPIC(DEBUG, Logger::REPLICATION) << msg;
}
}
////////////////////////////////////////////////////////////////////////////////
@ -616,7 +626,7 @@ int ContinuousSyncer::startTransaction(TRI_json_t const* json) {
TRI_ASSERT(tid > 0);
LOG_TOPIC(TRACE, Logger::REPLICATION) << "starting transaction " << tid;
LOG_TOPIC(TRACE, Logger::REPLICATION) << "starting replication transaction " << tid;
auto trx = std::make_unique<ReplicationTransaction>(_server, _vocbase, tid);
@ -658,7 +668,7 @@ int ContinuousSyncer::abortTransaction(TRI_json_t const* json) {
TRI_ASSERT(tid > 0);
LOG_TOPIC(TRACE, Logger::REPLICATION) << "abort replication transaction " << tid;
LOG_TOPIC(TRACE, Logger::REPLICATION) << "aborting replication transaction " << tid;
auto trx = (*it).second;
_ongoingTransactions.erase(tid);
@ -1022,8 +1032,6 @@ int ContinuousSyncer::runContinuousSync(std::string& errorMsg) {
return TRI_ERROR_INTERNAL;
}
LOG_TOPIC(TRACE, Logger::REPLICATION) << "starting with from tick " << fromTick << ", fetch tick " << fetchTick << ", open transactions: " << _ongoingTransactions.size();
std::string const progress =
"starting with from tick " + StringUtils::itoa(fromTick) +
", fetch tick " + StringUtils::itoa(fetchTick) + ", open transactions: " +
@ -1151,8 +1159,6 @@ int ContinuousSyncer::fetchMasterState(std::string& errorMsg,
setProgress(progress);
LOG_TOPIC(TRACE, Logger::REPLICATION) << "fetching initial master state with from tick " << fromTick << ", to tick " << toTick << ", url " << url.c_str();
// send request
std::unique_ptr<SimpleHttpResult> response(
_client->request(HttpRequest::HTTP_REQUEST_GET, url, nullptr, 0));
@ -1280,11 +1286,10 @@ int ContinuousSyncer::followMasterLog(std::string& errorMsg,
_localServerIdString + "&includeSystem=" +
(_includeSystem ? "true" : "false");
LOG_TOPIC(TRACE, Logger::REPLICATION) << "running continuous replication request with from tick " << fetchTick << ", first regular tick " << firstRegularTick << ", url " << url.c_str();
// send request
std::string const progress =
"fetching master log from tick " + StringUtils::itoa(fetchTick) +
", first regular tick " + StringUtils::itoa(firstRegularTick) +
", open transactions: " + std::to_string(_ongoingTransactions.size());
setProgress(progress);

View File

@ -1720,6 +1720,8 @@ int InitialSyncer::handleCollection(TRI_json_t const* parameters,
int res = TRI_ERROR_INTERNAL;
{
READ_LOCKER(readLocker, _vocbase->_inventoryLock);
SingleCollectionWriteTransaction<UINT64_MAX> trx(
new StandaloneTransactionContext(), _vocbase, col->_cid);
@ -1759,52 +1761,41 @@ int InitialSyncer::handleCollection(TRI_json_t const* parameters,
" index(es) for " + collectionMsg;
setProgress(progress);
READ_LOCKER(readLocker, _vocbase->_inventoryLock);
try {
arangodb::CollectionGuard guard(_vocbase, col->_cid, false);
TRI_vocbase_col_t* col = guard.collection();
TRI_document_collection_t* document = trx.documentCollection();
TRI_ASSERT(document != nullptr);
if (col == nullptr) {
res = TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
} else {
TRI_document_collection_t* document = col->_collection;
TRI_ASSERT(document != nullptr);
for (auto const& idxDef : VPackArrayIterator(indexes)) {
arangodb::Index* idx = nullptr;
for (auto const& idxDef : VPackArrayIterator(indexes)) {
arangodb::Index* idx = nullptr;
if (idxDef.isObject()) {
VPackSlice const type = idxDef.get("type");
if (type.isString()) {
std::string const progress = "creating index of type " +
type.copyString() + " for " +
collectionMsg;
setProgress(progress);
}
}
res = TRI_FromVelocyPackIndexDocumentCollection(&trx, document,
idxDef, &idx);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "could not create index: " +
std::string(TRI_errno_string(res));
break;
} else {
TRI_ASSERT(idx != nullptr);
res = TRI_SaveIndex(document, idx, true);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "could not save index: " +
std::string(TRI_errno_string(res));
break;
}
if (idxDef.isObject()) {
VPackSlice const type = idxDef.get("type");
if (type.isString()) {
std::string const progress = "creating index of type " +
type.copyString() + " for " +
collectionMsg;
setProgress(progress);
}
}
TRI_WRITE_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(document);
res = TRI_FromVelocyPackIndexDocumentCollection(&trx, document,
idxDef, &idx);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "could not create index: " +
std::string(TRI_errno_string(res));
break;
} else {
TRI_ASSERT(idx != nullptr);
res = TRI_SaveIndex(document, idx, true);
if (res != TRI_ERROR_NO_ERROR) {
errorMsg = "could not save index: " +
std::string(TRI_errno_string(res));
break;
}
}
}
} catch (arangodb::basics::Exception const& ex) {
res = ex.code();

View File

@ -111,7 +111,10 @@ class InitialSyncer : public Syncer {
_progress = msg;
if (_verbose) {
LOG_TOPIC(INFO, Logger::REPLICATION) << "synchronization progress: " << msg;
LOG_TOPIC(INFO, Logger::REPLICATION) << msg;
}
else {
LOG_TOPIC(DEBUG, Logger::REPLICATION) << msg;
}
if (_vocbase->_replicationApplier != nullptr) {

View File

@ -41,6 +41,11 @@
#include "VocBase/vocbase.h"
#include "VocBase/voc-types.h"
#include <velocypack/Builder.h>
#include <velocypack/Collection.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::rest;
@ -76,7 +81,7 @@ Syncer::Syncer(TRI_vocbase_t* vocbase,
TRI_InitConfigurationReplicationApplier(&_configuration);
TRI_CopyConfigurationReplicationApplier(configuration, &_configuration);
TRI_InitMasterInfoReplication(&_masterInfo, configuration->_endpoint);
_masterInfo._endpoint = configuration->_endpoint;
_endpoint = Endpoint::clientFactory(_configuration._endpoint);
@ -123,8 +128,6 @@ Syncer::~Syncer() {
delete _client;
delete _connection;
delete _endpoint;
TRI_DestroyMasterInfoReplication(&_masterInfo);
}
////////////////////////////////////////////////////////////////////////////////
@ -373,23 +376,19 @@ int Syncer::createCollection(TRI_json_t const* json, TRI_vocbase_col_t** dst) {
return TRI_ERROR_NO_ERROR;
}
TRI_json_t* keyOptions = nullptr;
if (JsonHelper::isObject(JsonHelper::getObjectElement(json, "keyOptions"))) {
keyOptions = TRI_CopyJson(TRI_UNKNOWN_MEM_ZONE,
JsonHelper::getObjectElement(json, "keyOptions"));
}
std::shared_ptr<VPackBuilder> opts = JsonHelper::toVelocyPack(keyOptions);
if (keyOptions != nullptr) {
TRI_FreeJson(TRI_UNKNOWN_MEM_ZONE, keyOptions);
}
std::shared_ptr<VPackBuilder> builder =
arangodb::basics::JsonHelper::toVelocyPack(json);
VocbaseCollectionInfo params(_vocbase, name.c_str(), builder->slice());
// merge in "isSystem" attribute
VPackBuilder s;
s.openObject();
s.add("isSystem", VPackValue(true));
s.close();
VPackBuilder merged = VPackCollection::merge(s.slice(), builder->slice(), true);
VocbaseCollectionInfo params(_vocbase, name.c_str(), merged.slice());
col = TRI_CreateCollectionVocBase(_vocbase, params, cid, true);
@ -570,7 +569,7 @@ int Syncer::getMasterState(std::string& errorMsg) {
if (response == nullptr || !response->isComplete()) {
errorMsg = "could not connect to master at " +
std::string(_masterInfo._endpoint) + ": " +
_masterInfo._endpoint + ": " +
_client->getErrorMessage();
return TRI_ERROR_REPLICATION_NO_RESPONSE;
@ -582,7 +581,7 @@ int Syncer::getMasterState(std::string& errorMsg) {
res = TRI_ERROR_REPLICATION_MASTER_ERROR;
errorMsg = "got invalid response from master at " +
std::string(_masterInfo._endpoint) + ": HTTP " +
_masterInfo._endpoint + ": HTTP " +
StringUtils::itoa(response->getHttpReturnCode()) + ": " +
response->getHttpReturnMessage();
} else {
@ -595,7 +594,7 @@ int Syncer::getMasterState(std::string& errorMsg) {
res = TRI_ERROR_REPLICATION_INVALID_RESPONSE;
errorMsg = "got invalid response from master at " +
std::string(_masterInfo._endpoint) + ": invalid JSON";
_masterInfo._endpoint + ": invalid JSON";
}
}
@ -608,7 +607,7 @@ int Syncer::getMasterState(std::string& errorMsg) {
int Syncer::handleStateResponse(TRI_json_t const* json, std::string& errorMsg) {
std::string const endpointString =
" from endpoint '" + std::string(_masterInfo._endpoint) + "'";
" from endpoint '" + _masterInfo._endpoint + "'";
// process "state" section
TRI_json_t const* state = JsonHelper::getObjectElement(json, "state");
@ -708,7 +707,7 @@ int Syncer::handleStateResponse(TRI_json_t const* json, std::string& errorMsg) {
_masterInfo._lastLogTick = lastLogTick;
_masterInfo._active = running;
TRI_LogMasterInfoReplication(&_masterInfo, "connected to");
LOG_TOPIC(INFO, Logger::REPLICATION) << "connected to master at " << _masterInfo._endpoint << ", id " << _masterInfo._serverId << ", version " << _masterInfo._majorVersion << "." << _masterInfo._minorVersion << ", last log tick " << _masterInfo._lastLogTick;
return TRI_ERROR_NO_ERROR;
}

View File

@ -26,7 +26,6 @@
#include "Basics/Common.h"
#include "VocBase/replication-applier.h"
#include "VocBase/replication-master.h"
#include "VocBase/server.h"
#include "VocBase/transaction.h"
#include "VocBase/update-policy.h"
@ -176,7 +175,15 @@ class Syncer {
/// @brief information about the master state
//////////////////////////////////////////////////////////////////////////////
TRI_replication_master_info_t _masterInfo;
struct {
std::string _endpoint;
TRI_server_id_t _serverId;
int _majorVersion;
int _minorVersion;
TRI_voc_tick_t _lastLogTick;
bool _active;
}
_masterInfo;
//////////////////////////////////////////////////////////////////////////////
/// @brief the update policy object (will be the same for all actions)

View File

@ -40,6 +40,8 @@
#include "VocBase/transaction.h"
#include "VocBase/vocbase.h"
using namespace arangodb;
////////////////////////////////////////////////////////////////////////////////
/// @brief read a tick value from a JSON struct
////////////////////////////////////////////////////////////////////////////////
@ -97,7 +99,7 @@ static int LoadConfiguration(TRI_vocbase_t* vocbase,
TRI_JsonFile(TRI_UNKNOWN_MEM_ZONE, filename, nullptr));
if (!TRI_IsObjectJson(json.get())) {
LOG(ERR) << "unable to read replication applier configuration from file '" << filename << "'";
LOG_TOPIC(ERR, Logger::REPLICATION) << "unable to read replication applier configuration from file '" << filename << "'";
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
return TRI_ERROR_REPLICATION_INVALID_APPLIER_CONFIGURATION;
}
@ -796,7 +798,7 @@ int TRI_RemoveStateReplicationApplier(TRI_vocbase_t* vocbase) {
int res;
if (TRI_ExistsFile(filename)) {
LOG(TRACE) << "removing replication state file '" << filename << "'";
LOG_TOPIC(TRACE, Logger::REPLICATION) << "removing replication state file '" << filename << "'";
res = TRI_UnlinkFile(filename);
} else {
res = TRI_ERROR_NO_ERROR;
@ -825,7 +827,7 @@ int TRI_SaveStateReplicationApplier(
}
char* filename = GetStateFilename(vocbase);
LOG(TRACE) << "saving replication applier state to file '" << filename << "'";
LOG_TOPIC(TRACE, Logger::REPLICATION) << "saving replication applier state to file '" << filename << "'";
if (!TRI_SaveJson(filename, json.get(), doSync)) {
int res = TRI_errno();
@ -856,7 +858,7 @@ int TRI_LoadStateReplicationApplier(TRI_vocbase_t* vocbase,
return TRI_ERROR_OUT_OF_MEMORY;
}
LOG(TRACE) << "looking for replication state file '" << filename << "'";
LOG_TOPIC(TRACE, Logger::REPLICATION) << "looking for replication state file '" << filename << "'";
if (!TRI_ExistsFile(filename)) {
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
@ -864,7 +866,7 @@ int TRI_LoadStateReplicationApplier(TRI_vocbase_t* vocbase,
return TRI_ERROR_FILE_NOT_FOUND;
}
LOG(TRACE) << "replication state file '" << filename << "' found";
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "replication state file '" << filename << "' found";
std::unique_ptr<TRI_json_t> json(
TRI_JsonFile(TRI_UNKNOWN_MEM_ZONE, filename, nullptr));
@ -902,7 +904,7 @@ int TRI_LoadStateReplicationApplier(TRI_vocbase_t* vocbase,
ReadTick(json.get(), "safeResumeTick", &state->_safeResumeTick, true);
}
LOG(TRACE) << "replication state file read successfully";
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "replication state file read successfully";
return res;
}
@ -1081,12 +1083,12 @@ TRI_replication_applier_t::~TRI_replication_applier_t() {
////////////////////////////////////////////////////////////////////////////////
int TRI_replication_applier_t::start(TRI_voc_tick_t initialTick, bool useTick) {
LOG(TRACE) << "requesting replication applier start. initialTick: " << initialTick << ", useTick: " << useTick;
if (_vocbase->_type == TRI_VOCBASE_TYPE_COORDINATOR) {
return TRI_ERROR_CLUSTER_UNSUPPORTED;
}
LOG_TOPIC(DEBUG, Logger::REPLICATION) << "requesting replication applier start. initialTick: " << initialTick << ", useTick: " << useTick;
// wait until previous applier thread is shut down
while (!wait(10 * 1000))
;
@ -1138,9 +1140,9 @@ int TRI_replication_applier_t::start(TRI_voc_tick_t initialTick, bool useTick) {
syncer.release();
if (useTick) {
LOG(INFO) << "started replication applier for database '" << _databaseName.c_str() << "', endpoint '" << _configuration._endpoint << "' from tick " << initialTick;
LOG_TOPIC(INFO, Logger::REPLICATION) << "started replication applier for database '" << _databaseName.c_str() << "', endpoint '" << _configuration._endpoint << "' from tick " << initialTick;
} else {
LOG(INFO) << "re-started replication applier for database '" << _databaseName.c_str() << "', endpoint '" << _configuration._endpoint << "'";
LOG_TOPIC(INFO, Logger::REPLICATION) << "re-started replication applier for database '" << _databaseName.c_str() << "', endpoint '" << _configuration._endpoint << "'";
}
return TRI_ERROR_NO_ERROR;
@ -1220,12 +1222,10 @@ void TRI_replication_applier_t::stopInitialSynchronization(bool value) {
////////////////////////////////////////////////////////////////////////////////
int TRI_replication_applier_t::stop(bool resetError) {
LOG(TRACE) << "requesting replication applier stop";
if (_vocbase->_type == TRI_VOCBASE_TYPE_COORDINATOR) {
return TRI_ERROR_CLUSTER_UNSUPPORTED;
}
{
WRITE_LOCKER(writeLocker, _statusLock);
@ -1235,6 +1235,7 @@ int TRI_replication_applier_t::stop(bool resetError) {
if (!_state._active) {
return TRI_ERROR_NO_ERROR;
}
_state._active = false;
setTermination(true);
@ -1259,7 +1260,7 @@ int TRI_replication_applier_t::stop(bool resetError) {
setTermination(false);
LOG(INFO) << "stopped replication applier for database '" << _databaseName.c_str() << "'";
LOG_TOPIC(INFO, Logger::REPLICATION) << "stopped replication applier for database '" << _databaseName.c_str() << "'";
return res;
}
@ -1290,12 +1291,10 @@ int TRI_replication_applier_t::forget() {
////////////////////////////////////////////////////////////////////////////////
int TRI_replication_applier_t::shutdown() {
LOG(TRACE) << "requesting replication applier shutdown";
if (_vocbase->_type == TRI_VOCBASE_TYPE_COORDINATOR) {
return TRI_ERROR_CLUSTER_UNSUPPORTED;
}
{
WRITE_LOCKER(writeLocker, _statusLock);
@ -1325,7 +1324,7 @@ int TRI_replication_applier_t::shutdown() {
setTermination(false);
LOG(INFO) << "stopped replication applier for database '" << _databaseName.c_str() << "'";
LOG_TOPIC(INFO, Logger::REPLICATION) << "shut down replication applier for database '" << _databaseName.c_str() << "'";
return res;
}
@ -1340,7 +1339,7 @@ bool TRI_replication_applier_t::wait(uint64_t sleepTime) {
}
if (sleepTime > 0) {
LOG(TRACE) << "replication applier going to sleep for " << sleepTime << " ns";
LOG_TOPIC(TRACE, Logger::REPLICATION) << "replication applier going to sleep for " << sleepTime << " ns";
static uint64_t const SleepChunk = 500 * 1000;
@ -1498,7 +1497,7 @@ int TRI_replication_applier_t::doSetError(int errorCode, char const* msg) {
// log error message
if (errorCode != TRI_ERROR_REPLICATION_APPLIER_STOPPED) {
LOG(ERR) << "replication applier error for database '" << _databaseName.c_str() << "': " << realMsg;
LOG_TOPIC(ERR, Logger::REPLICATION) << "replication applier error for database '" << _databaseName << "': " << realMsg;
}
_state._lastError._code = errorCode;

View File

@ -1,65 +0,0 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "replication-master.h"
#include "Basics/Logger.h"
#include "Basics/tri-strings.h"
////////////////////////////////////////////////////////////////////////////////
/// @brief initialize a master info struct
////////////////////////////////////////////////////////////////////////////////
void TRI_InitMasterInfoReplication(TRI_replication_master_info_t* info,
char const* endpoint) {
TRI_ASSERT(endpoint != nullptr);
info->_endpoint = TRI_DuplicateString(TRI_CORE_MEM_ZONE, endpoint);
info->_serverId = 0;
info->_majorVersion = 0;
info->_minorVersion = 0;
info->_lastLogTick = 0;
info->_active = false;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief destroy a master info struct
////////////////////////////////////////////////////////////////////////////////
void TRI_DestroyMasterInfoReplication(TRI_replication_master_info_t* info) {
if (info->_endpoint != nullptr) {
TRI_FreeString(TRI_CORE_MEM_ZONE, info->_endpoint);
info->_endpoint = nullptr;
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief log information about the master state
////////////////////////////////////////////////////////////////////////////////
void TRI_LogMasterInfoReplication(TRI_replication_master_info_t const* info,
char const* prefix) {
TRI_ASSERT(info->_endpoint != nullptr);
LOG(INFO) << "" << prefix << " master at " << info->_endpoint << ", id " << info->_serverId << ", version " << info->_majorVersion << "." << info->_minorVersion << ", last log tick " << info->_lastLogTick;
}

View File

@ -1,63 +0,0 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_VOC_BASE_REPLICATION_MASTER_H
#define ARANGOD_VOC_BASE_REPLICATION_MASTER_H 1
#include "Basics/Common.h"
#include "VocBase/replication-common.h"
////////////////////////////////////////////////////////////////////////////////
/// @brief state information about replication master
////////////////////////////////////////////////////////////////////////////////
typedef struct TRI_replication_master_info_s {
char* _endpoint;
TRI_server_id_t _serverId;
int _majorVersion;
int _minorVersion;
TRI_voc_tick_t _lastLogTick;
bool _active;
} TRI_replication_master_info_t;
////////////////////////////////////////////////////////////////////////////////
/// @brief initialize a master info struct
////////////////////////////////////////////////////////////////////////////////
void TRI_InitMasterInfoReplication(TRI_replication_master_info_t*, char const*);
////////////////////////////////////////////////////////////////////////////////
/// @brief destroy a master info struct
////////////////////////////////////////////////////////////////////////////////
void TRI_DestroyMasterInfoReplication(TRI_replication_master_info_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief log information about the master state
////////////////////////////////////////////////////////////////////////////////
void TRI_LogMasterInfoReplication(TRI_replication_master_info_t const*,
char const*);
#endif

View File

@ -150,7 +150,6 @@ applier.properties = function(config) {
return requestResult;
};
////////////////////////////////////////////////////////////////////////////////
/// @brief performs a one-time synchronization with a remote endpoint
////////////////////////////////////////////////////////////////////////////////
@ -162,33 +161,31 @@ var sync = function(config) {
const headers = {
"X-Arango-Async": "store"
};
const requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
arangosh.checkRequestResult(requestResult);
if (config.async) {
const requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
arangosh.checkRequestResult(requestResult);
return requestResult.headers["x-arango-async-id"];
} else {
const requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
arangosh.checkRequestResult(requestResult);
}
let count = 0;
let count = 0;
while (true) {
const jobResult = db._connection.PUT(
"/_api/job/" + requestResult.headers["x-arango-async-id"], "");
arangosh.checkRequestResult(jobResult);
while (true) {
const jobResult = db._connection.PUT(
"/_api/job/" + requestResult.headers["x-arango-async-id"], "");
arangosh.checkRequestResult(jobResult);
if (jobResult.code !== 204) {
return jobResult;
}
if (++count % 6 === 0) {
internal.print("still synchronizing, please wait...");
}
internal.sleep(10);
if (jobResult.code !== 204) {
return jobResult;
}
if (++count % 6 === 0) {
internal.print("still synchronizing, please wait...");
}
internal.sleep(5);
}
};
@ -204,25 +201,65 @@ var syncCollection = function(collection, config) {
config.restrictType = "include";
config.restrictCollections = [collection];
config.includeSystem = true;
var body = JSON.stringify(config);
var requestResult;
if (config.async) {
var headers = {
"X-Arango-Async": "store"
};
requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
} else {
requestResult = db._connection.PUT("/_api/replication/sync", body);
return sync(config);
};
////////////////////////////////////////////////////////////////////////////////
/// @brief sets up the replication (all-in-one function for initial
/// synchronization and continuous replication)
////////////////////////////////////////////////////////////////////////////////
var setupReplication = function(config) {
config = config || { };
if (! config.hasOwnProperty('autoStart')) {
config.autoStart = true;
}
if (! config.hasOwnProperty('includeSystem')) {
config.includeSystem = true;
}
if (! config.hasOwnProperty('verbose')) {
config.verbose = false;
}
const db = internal.db;
const body = JSON.stringify(config);
const headers = {
"X-Arango-Async": "store"
};
const requestResult = db._connection.PUT_RAW("/_api/replication/make-slave", body, headers);
arangosh.checkRequestResult(requestResult);
if (config.async) {
return requestResult.headers["x-arango-async-id"];
}
let count = 0;
return requestResult;
while (true) {
const jobResult = db._connection.PUT(
"/_api/job/" + requestResult.headers["x-arango-async-id"], "");
arangosh.checkRequestResult(jobResult);
if (jobResult.code !== 204) {
return jobResult;
}
if (++count % 6 === 0) {
internal.print("still synchronizing, please wait...");
}
internal.sleep(5);
}
};
////////////////////////////////////////////////////////////////////////////////
/// @brief queries the sync result status
////////////////////////////////////////////////////////////////////////////////
var getSyncResult = function(id) {
var db = internal.db;
@ -250,11 +287,11 @@ var serverId = function() {
return requestResult.serverId;
};
exports.logger = logger;
exports.applier = applier;
exports.sync = sync;
exports.syncCollection = syncCollection;
exports.setupReplication = setupReplication;
exports.getSyncResult = getSyncResult;
exports.serverId = serverId;
});

View File

@ -149,7 +149,6 @@ applier.properties = function(config) {
return requestResult;
};
////////////////////////////////////////////////////////////////////////////////
/// @brief performs a one-time synchronization with a remote endpoint
////////////////////////////////////////////////////////////////////////////////
@ -161,33 +160,31 @@ var sync = function(config) {
const headers = {
"X-Arango-Async": "store"
};
const requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
arangosh.checkRequestResult(requestResult);
if (config.async) {
const requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
arangosh.checkRequestResult(requestResult);
return requestResult.headers["x-arango-async-id"];
} else {
const requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
arangosh.checkRequestResult(requestResult);
}
let count = 0;
let count = 0;
while (true) {
const jobResult = db._connection.PUT(
"/_api/job/" + requestResult.headers["x-arango-async-id"], "");
arangosh.checkRequestResult(jobResult);
while (true) {
const jobResult = db._connection.PUT(
"/_api/job/" + requestResult.headers["x-arango-async-id"], "");
arangosh.checkRequestResult(jobResult);
if (jobResult.code !== 204) {
return jobResult;
}
if (++count % 6 === 0) {
internal.print("still synchronizing, please wait...");
}
internal.sleep(10);
if (jobResult.code !== 204) {
return jobResult;
}
if (++count % 6 === 0) {
internal.print("still synchronizing, please wait...");
}
internal.sleep(5);
}
};
@ -203,25 +200,65 @@ var syncCollection = function(collection, config) {
config.restrictType = "include";
config.restrictCollections = [collection];
config.includeSystem = true;
var body = JSON.stringify(config);
var requestResult;
if (config.async) {
var headers = {
"X-Arango-Async": "store"
};
requestResult = db._connection.PUT_RAW("/_api/replication/sync", body, headers);
} else {
requestResult = db._connection.PUT("/_api/replication/sync", body);
return sync(config);
};
////////////////////////////////////////////////////////////////////////////////
/// @brief sets up the replication (all-in-one function for initial
/// synchronization and continuous replication)
////////////////////////////////////////////////////////////////////////////////
var setupReplication = function(config) {
config = config || { };
if (! config.hasOwnProperty('autoStart')) {
config.autoStart = true;
}
if (! config.hasOwnProperty('includeSystem')) {
config.includeSystem = true;
}
if (! config.hasOwnProperty('verbose')) {
config.verbose = false;
}
const db = internal.db;
const body = JSON.stringify(config);
const headers = {
"X-Arango-Async": "store"
};
const requestResult = db._connection.PUT_RAW("/_api/replication/make-slave", body, headers);
arangosh.checkRequestResult(requestResult);
if (config.async) {
return requestResult.headers["x-arango-async-id"];
}
let count = 0;
return requestResult;
while (true) {
const jobResult = db._connection.PUT(
"/_api/job/" + requestResult.headers["x-arango-async-id"], "");
arangosh.checkRequestResult(jobResult);
if (jobResult.code !== 204) {
return jobResult;
}
if (++count % 6 === 0) {
internal.print("still synchronizing, please wait...");
}
internal.sleep(5);
}
};
////////////////////////////////////////////////////////////////////////////////
/// @brief queries the sync result status
////////////////////////////////////////////////////////////////////////////////
var getSyncResult = function(id) {
var db = internal.db;
@ -249,10 +286,10 @@ var serverId = function() {
return requestResult.serverId;
};
exports.logger = logger;
exports.applier = applier;
exports.sync = sync;
exports.syncCollection = syncCollection;
exports.setupReplication = setupReplication;
exports.getSyncResult = getSyncResult;
exports.serverId = serverId;

View File

@ -29,8 +29,6 @@
var internal = require("internal");
var logger = { };
var applier = { };
@ -106,7 +104,6 @@ applier.properties = function (config) {
return internal.configureReplicationApplier(config);
};
////////////////////////////////////////////////////////////////////////////////
/// @brief performs a one-time synchronization with a remote endpoint
////////////////////////////////////////////////////////////////////////////////
@ -131,6 +128,42 @@ function syncCollection (collection, config) {
return internal.synchronizeReplication(config);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief sets up the replication (all-in-one function for initial
/// synchronization and continuous replication)
////////////////////////////////////////////////////////////////////////////////
function setupReplication (config) {
config = config || { };
if (! config.hasOwnProperty('autoStart')) {
config.autoStart = true;
}
if (! config.hasOwnProperty('includeSystem')) {
config.includeSystem = true;
}
if (! config.hasOwnProperty('verbose')) {
config.verbose = false;
}
try {
// stop previous instance
applier.stop();
}
catch (err) {
}
// remove existing configuration
applier.forget();
// run initial sync
var result = internal.synchronizeReplication(config);
// store applier configuration
applier.properties(config);
applier.start(result.lastLogTick);
return applier.state();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the server's id
////////////////////////////////////////////////////////////////////////////////
@ -139,11 +172,10 @@ function serverId () {
return internal.serverId();
}
exports.logger = logger;
exports.applier = applier;
exports.sync = sync;
exports.syncCollection = syncCollection;
exports.serverId = serverId;
exports.logger = logger;
exports.applier = applier;
exports.sync = sync;
exports.syncCollection = syncCollection;
exports.setupReplication = setupReplication;
exports.serverId = serverId;