mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of github.com:arangodb/arangodb into devel
This commit is contained in:
commit
a307ee57c4
|
@ -250,6 +250,8 @@ endif ()
|
|||
find_package(PythonInterp 2 EXACT REQUIRED)
|
||||
get_filename_component(PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}" REALPATH)
|
||||
|
||||
set($ENV{PYTHON_EXECUTABLE} ${PYTHON_EXECUTABLE})
|
||||
|
||||
if (NOT WINDOWS)
|
||||
find_program(CHMOD_EXECUTABLE chmod)
|
||||
find_program(CHOWN_EXECUTABLE chown)
|
||||
|
@ -836,16 +838,11 @@ if (USE_MAINTAINER_MODE)
|
|||
|
||||
foreach (m IN LISTS ERROR_FILES)
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
${CMAKE_SOURCE_DIR}/${m}
|
||||
COMMAND
|
||||
PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} ./utils/generateErrorfile.sh ./${ERRORS_DAT} ./${m}
|
||||
DEPENDS
|
||||
${CMAKE_SOURCE_DIR}/${ERRORS_DAT}
|
||||
WORKING_DIRECTORY
|
||||
${CMAKE_SOURCE_DIR}
|
||||
COMMENT
|
||||
"Building errors files ${m}"
|
||||
OUTPUT ${CMAKE_SOURCE_DIR}/${m}
|
||||
COMMAND ./utils/generateErrorfile.sh ./${ERRORS_DAT} ./${m}
|
||||
DEPENDS ${CMAKE_SOURCE_DIR}/${ERRORS_DAT}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
COMMENT "Building errors files ${m}"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
|
@ -870,16 +867,11 @@ if (USE_MAINTAINER_MODE)
|
|||
|
||||
foreach (m IN LISTS MIMETYPES_FILES)
|
||||
add_custom_command(
|
||||
OUTPUT
|
||||
${CMAKE_SOURCE_DIR}/${m}
|
||||
COMMAND
|
||||
PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE} ./utils/generateMimetypes.sh ./${MIMETYPES_DAT} ./${m}
|
||||
DEPENDS
|
||||
${CMAKE_SOURCE_DIR}/${MIMETYPES_DAT}
|
||||
WORKING_DIRECTORY
|
||||
${CMAKE_SOURCE_DIR}
|
||||
COMMENT
|
||||
"Building mimetypes files ${m}"
|
||||
OUTPUT ${CMAKE_SOURCE_DIR}/${m}
|
||||
COMMAND ./utils/generateMimetypes.sh ./${MIMETYPES_DAT} ./${m}
|
||||
DEPENDS ${CMAKE_SOURCE_DIR}/${MIMETYPES_DAT}
|
||||
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
|
||||
COMMENT "Building mimetypes files ${m}"
|
||||
VERBATIM
|
||||
)
|
||||
|
||||
|
|
|
@ -462,7 +462,9 @@ if [ -z "${MSVC}" ]; then
|
|||
if test -z "${STRIP}"; then
|
||||
STRIP=/usr/bin/strip
|
||||
if [ ! -f ${STRIP} ] ; then
|
||||
set +e
|
||||
STRIP=`which strip`
|
||||
set -e
|
||||
fi
|
||||
export STRIP
|
||||
fi
|
||||
|
@ -473,7 +475,9 @@ if [ -z "${MSVC}" ]; then
|
|||
if test -z "${OBJCOPY}"; then
|
||||
OBJCOPY=/usr/bin/objcopy
|
||||
if [ ! -f ${OBJCOPY} ] ; then
|
||||
set +e
|
||||
OBJCOPY=`which objcopy`
|
||||
set -e
|
||||
fi
|
||||
export OBJCOPY
|
||||
fi
|
||||
|
|
|
@ -1108,7 +1108,8 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
}
|
||||
}
|
||||
if (tmpHaveError) {
|
||||
*errMsg = "Error in creation of collection:" + tmpMsg;
|
||||
*errMsg = "Error in creation of collection:" + tmpMsg + " "
|
||||
+ __FILE__ + std::to_string(__LINE__);
|
||||
*dbServerResult = TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION;
|
||||
return true;
|
||||
}
|
||||
|
@ -1150,6 +1151,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName,
|
|||
AgencyCommResult res = ac.sendTransactionWithFailover(transaction);
|
||||
|
||||
if (!res.successful()) {
|
||||
errorMsg += std::string(" ") + __FILE__ + std::to_string(__LINE__);
|
||||
events::CreateCollection(
|
||||
name, TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN);
|
||||
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
|
||||
|
@ -1724,6 +1726,7 @@ int ClusterInfo::ensureIndexCoordinator(
|
|||
AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0);
|
||||
|
||||
if (!result.successful()) {
|
||||
errorMsg += std::string(" ") + __FILE__ + ":" + std::to_string(__LINE__);
|
||||
resultBuilder = *resBuilder;
|
||||
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
|
||||
errorMsg);
|
||||
|
@ -1944,6 +1947,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName,
|
|||
AgencyCommResult result = ac.sendTransactionWithFailover(trx, 0.0);
|
||||
|
||||
if (!result.successful()) {
|
||||
errorMsg += std::string(" ") + __FILE__ + ":" + std::to_string(__LINE__);
|
||||
events::DropIndex(collectionID, idString,
|
||||
TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN);
|
||||
return setErrormsg(TRI_ERROR_CLUSTER_COULD_NOT_CREATE_COLLECTION_IN_PLAN,
|
||||
|
|
|
@ -263,11 +263,45 @@ static void JS_GetAgency(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief acquires a read-lock in the agency
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_LockReadAgency(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (args.Length() < 1) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("lockRead(<part>, <ttl>, <timeout>)");
|
||||
}
|
||||
|
||||
std::string const part = TRI_ObjectToString(args[0]);
|
||||
|
||||
double ttl = 0.0;
|
||||
if (args.Length() > 1) {
|
||||
ttl = TRI_ObjectToDouble(args[1]);
|
||||
}
|
||||
|
||||
double timeout = 0.0;
|
||||
if (args.Length() > 2) {
|
||||
timeout = TRI_ObjectToDouble(args[2]);
|
||||
}
|
||||
|
||||
AgencyComm comm;
|
||||
if (!comm.lockRead(part, ttl, timeout)) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unable to acquire lock");
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief read transaction to the agency
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_APIAgency(std::string const& method,
|
||||
static void JS_APIAgency(std::string const& envelope,
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate)
|
||||
v8::HandleScope scope(isolate);
|
||||
|
@ -288,7 +322,7 @@ static void JS_APIAgency(std::string const& method,
|
|||
comm.sendWithFailover(
|
||||
arangodb::rest::RequestType::POST,
|
||||
AgencyCommManager::CONNECTION_OPTIONS._requestTimeout,
|
||||
std::string("/_api/agency/") + method, builder.toJson());
|
||||
std::string("/_api/agency/") + envelope, builder.toJson());
|
||||
|
||||
if (!result.successful()) {
|
||||
THROW_AGENCY_EXCEPTION(result);
|
||||
|
@ -324,6 +358,101 @@ static void JS_TransactAgency(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief acquires a write-lock in the agency
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_LockWriteAgency(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (args.Length() < 1) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("lockWrite(<part>, <ttl>, <timeout>)");
|
||||
}
|
||||
|
||||
std::string const part = TRI_ObjectToString(args[0]);
|
||||
|
||||
double ttl = 0.0;
|
||||
if (args.Length() > 1) {
|
||||
ttl = TRI_ObjectToDouble(args[1]);
|
||||
}
|
||||
|
||||
double timeout = 0.0;
|
||||
if (args.Length() > 2) {
|
||||
timeout = TRI_ObjectToDouble(args[2]);
|
||||
}
|
||||
|
||||
AgencyComm comm;
|
||||
if (!comm.lockWrite(part, ttl, timeout)) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unable to acquire lock");
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief releases a read-lock in the agency
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_UnlockReadAgency(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (args.Length() > 2) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("unlockRead(<part>, <timeout>)");
|
||||
}
|
||||
|
||||
std::string const part = TRI_ObjectToString(args[0]);
|
||||
|
||||
double timeout = 0.0;
|
||||
if (args.Length() > 1) {
|
||||
timeout = TRI_ObjectToDouble(args[1]);
|
||||
}
|
||||
|
||||
AgencyComm comm;
|
||||
if (!comm.unlockRead(part, timeout)) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unable to release lock");
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief releases a write-lock in the agency
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_UnlockWriteAgency(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (args.Length() > 2) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("unlockWrite(<part>, <timeout>)");
|
||||
}
|
||||
|
||||
std::string const part = TRI_ObjectToString(args[0]);
|
||||
|
||||
double timeout = 0.0;
|
||||
if (args.Length() > 1) {
|
||||
timeout = TRI_ObjectToDouble(args[1]);
|
||||
}
|
||||
|
||||
AgencyComm comm;
|
||||
if (!comm.unlockWrite(part, timeout)) {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
|
||||
"unable to release lock");
|
||||
}
|
||||
|
||||
TRI_V8_RETURN_TRUE();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes a value from the agency
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1189,25 +1318,6 @@ static void JS_CoordinatorConfigServerState(
|
|||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
|
||||
#ifdef DEBUG_SYNC_REPLICATION
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set arangoserver state to initialized
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static void JS_SetInitializedServerState(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
if (args.Length() != 0) {
|
||||
TRI_V8_THROW_EXCEPTION_USAGE("setInitialized()");
|
||||
}
|
||||
|
||||
ServerState::instance()->setInitialized();
|
||||
TRI_V8_TRY_CATCH_END
|
||||
}
|
||||
#endif
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief return whether the cluster is initialized
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1995,6 +2105,10 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
|
|||
JS_IsEnabledAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("increaseVersion"),
|
||||
JS_IncreaseVersionAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("lockRead"),
|
||||
JS_LockReadAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("lockWrite"),
|
||||
JS_LockWriteAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("remove"),
|
||||
JS_RemoveAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("set"), JS_SetAgency);
|
||||
|
@ -2004,6 +2118,10 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
|
|||
JS_PrefixAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("uniqid"),
|
||||
JS_UniqidAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("unlockRead"),
|
||||
JS_UnlockReadAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("unlockWrite"),
|
||||
JS_UnlockWriteAgency);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("version"),
|
||||
JS_VersionAgency);
|
||||
|
||||
|
@ -2109,10 +2227,6 @@ void TRI_InitV8Cluster(v8::Isolate* isolate, v8::Handle<v8::Context> context) {
|
|||
JS_DBserverConfigServerState);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("coordinatorConfig"),
|
||||
JS_CoordinatorConfigServerState);
|
||||
#ifdef DEBUG_SYNC_REPLICATION
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("setInitialized"),
|
||||
JS_SetInitializedServerState);
|
||||
#endif
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("initialized"),
|
||||
JS_InitializedServerState);
|
||||
TRI_AddMethodVocbase(isolate, rt, TRI_V8_ASCII_STRING("isCoordinator"),
|
||||
|
|
|
@ -469,6 +469,7 @@ int MMFilesCollection::reserveJournalSpace(TRI_voc_tick_t tick,
|
|||
TRI_voc_size_t size,
|
||||
char*& resultPosition,
|
||||
TRI_datafile_t*& resultDatafile) {
|
||||
|
||||
// reset results
|
||||
resultPosition = nullptr;
|
||||
resultDatafile = nullptr;
|
||||
|
@ -484,6 +485,11 @@ int MMFilesCollection::reserveJournalSpace(TRI_voc_tick_t tick,
|
|||
}
|
||||
|
||||
while (true) {
|
||||
// no need to go on if the collection is already deleted
|
||||
if (_logicalCollection->status() == TRI_VOC_COL_STATUS_DELETED) {
|
||||
return TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND;
|
||||
}
|
||||
|
||||
TRI_datafile_t* datafile = nullptr;
|
||||
|
||||
if (_journals.empty()) {
|
||||
|
@ -515,7 +521,6 @@ int MMFilesCollection::reserveJournalSpace(TRI_voc_tick_t tick,
|
|||
TRI_ASSERT(datafile != nullptr);
|
||||
|
||||
// try to reserve space in the datafile
|
||||
|
||||
TRI_df_marker_t* position = nullptr;
|
||||
int res = datafile->reserveElement(size, &position, targetSize);
|
||||
|
||||
|
|
|
@ -78,7 +78,7 @@ static TRI_voc_crc_t Crc28(TRI_voc_crc_t crc, void const* data, size_t length) {
|
|||
return crc;
|
||||
}
|
||||
|
||||
static bool IsMarker28 (void const* marker) {
|
||||
static bool IsMarker28(void const* marker) {
|
||||
struct Marker28 {
|
||||
TRI_voc_size_t _size;
|
||||
TRI_voc_crc_t _crc;
|
||||
|
@ -142,8 +142,6 @@ static bool CheckCrcMarker(TRI_df_marker_t const* marker, char const* end) {
|
|||
return marker->getCrc() == expected;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief extract the numeric part from a filename
|
||||
/// the filename must look like this: /.*type-abc\.ending$/, where abc is
|
||||
|
@ -166,6 +164,8 @@ static uint64_t GetNumericFilenamePart(char const* filename) {
|
|||
return StringUtils::uint64(pos2 + 1, pos1 - pos2 - 1);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief creates a new anonymous datafile
|
||||
///
|
||||
|
@ -254,10 +254,9 @@ static TRI_datafile_t* CreatePhysicalDatafile(std::string const& filename,
|
|||
// remove empty file
|
||||
TRI_UnlinkFile(filename.c_str());
|
||||
|
||||
LOG(ERR) << "cannot memory map file '" << filename << "': '" << TRI_errno_string((int)res) << "'";
|
||||
LOG(ERR) << "cannot memory map file '" << filename << "': '" << TRI_errno_string(res) << "'";
|
||||
LOG(ERR) << "The database directory might reside on a shared folder "
|
||||
"(VirtualBox, VMWare) or an NFS "
|
||||
"mounted volume which does not allow memory mapped files.";
|
||||
"(VirtualBox, VMWare) or an NFS-mounted volume which does not allow memory mapped files.";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
@ -296,9 +295,6 @@ TRI_datafile_t* TRI_datafile_t::create(std::string const& filename, TRI_voc_fid_
|
|||
if (filename.empty()) {
|
||||
#ifdef TRI_HAVE_ANONYMOUS_MMAP
|
||||
datafile.reset(CreateAnonymousDatafile(fid, maximalSize));
|
||||
#else
|
||||
// system does not support anonymous mmap
|
||||
return nullptr;
|
||||
#endif
|
||||
} else {
|
||||
datafile.reset(CreatePhysicalDatafile(filename, fid, maximalSize));
|
||||
|
@ -472,27 +468,27 @@ int TRI_datafile_t::reserveElement(TRI_voc_size_t size, TRI_df_marker_t** positi
|
|||
}
|
||||
|
||||
void TRI_datafile_t::sequentialAccess() {
|
||||
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_SEQUENTIAL);
|
||||
TRI_MMFileAdvise(_data, _initSize, TRI_MADVISE_SEQUENTIAL);
|
||||
}
|
||||
|
||||
void TRI_datafile_t::randomAccess() {
|
||||
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_RANDOM);
|
||||
TRI_MMFileAdvise(_data, _initSize, TRI_MADVISE_RANDOM);
|
||||
}
|
||||
|
||||
void TRI_datafile_t::willNeed() {
|
||||
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_WILLNEED);
|
||||
TRI_MMFileAdvise(_data, _initSize, TRI_MADVISE_WILLNEED);
|
||||
}
|
||||
|
||||
void TRI_datafile_t::dontNeed() {
|
||||
TRI_MMFileAdvise(_data, _maximalSize, TRI_MADVISE_DONTNEED);
|
||||
TRI_MMFileAdvise(_data, _initSize, TRI_MADVISE_DONTNEED);
|
||||
}
|
||||
|
||||
bool TRI_datafile_t::readOnly() {
|
||||
return (TRI_ProtectMMFile(_data, _maximalSize, PROT_READ, _fd) == TRI_ERROR_NO_ERROR);
|
||||
return (TRI_ProtectMMFile(_data, _initSize, PROT_READ, _fd) == TRI_ERROR_NO_ERROR);
|
||||
}
|
||||
|
||||
bool TRI_datafile_t::readWrite() {
|
||||
return (TRI_ProtectMMFile(_data, _maximalSize, PROT_READ | PROT_WRITE, _fd) == TRI_ERROR_NO_ERROR);
|
||||
return (TRI_ProtectMMFile(_data, _initSize, PROT_READ | PROT_WRITE, _fd) == TRI_ERROR_NO_ERROR);
|
||||
}
|
||||
|
||||
int TRI_datafile_t::lockInMemory() {
|
||||
|
@ -980,6 +976,7 @@ int TRI_datafile_t::close() {
|
|||
int res = TRI_UNMMFile(_data, _initSize, _fd, &_mmHandle);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
// leave file open here as it will still be memory-mapped
|
||||
LOG(ERR) << "munmap failed with: " << res;
|
||||
_state = TRI_DF_STATE_WRITE_ERROR;
|
||||
_lastError = res;
|
||||
|
@ -987,6 +984,7 @@ int TRI_datafile_t::close() {
|
|||
}
|
||||
|
||||
if (isPhysical()) {
|
||||
TRI_ASSERT(_fd >= 0);
|
||||
int res = TRI_CLOSE(_fd);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -1003,9 +1001,12 @@ int TRI_datafile_t::close() {
|
|||
}
|
||||
|
||||
if (_state == TRI_DF_STATE_CLOSED) {
|
||||
TRI_ASSERT(_fd == -1);
|
||||
LOG(TRACE) << "closing an already closed datafile '" << getName() << "'";
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
LOG(ERR) << "attempting to close datafile with an invalid state";
|
||||
|
||||
return TRI_ERROR_ARANGO_ILLEGAL_STATE;
|
||||
}
|
||||
|
@ -1142,6 +1143,7 @@ int TRI_datafile_t::truncateAndSeal(TRI_voc_size_t position) {
|
|||
_currentSize = position;
|
||||
// do not change _initSize!
|
||||
TRI_ASSERT(_initSize == _maximalSize);
|
||||
TRI_ASSERT(maximalSize <= _initSize);
|
||||
_maximalSize = static_cast<TRI_voc_size_t>(maximalSize);
|
||||
_fd = fd;
|
||||
_mmHandle = mmHandle;
|
||||
|
@ -1447,6 +1449,7 @@ bool TRI_datafile_t::fix(TRI_voc_size_t currentSize) {
|
|||
|
||||
_currentSize = currentSize;
|
||||
TRI_ASSERT(_initSize == _maximalSize);
|
||||
TRI_ASSERT(currentSize <= _initSize);
|
||||
_maximalSize = static_cast<TRI_voc_size_t>(currentSize);
|
||||
_next = _data + _currentSize;
|
||||
_full = true;
|
||||
|
@ -1848,7 +1851,7 @@ TRI_datafile_t* TRI_datafile_t::openHelper(std::string const& filename, bool ign
|
|||
|
||||
// check the maximal size
|
||||
if (size > header->_maximalSize) {
|
||||
LOG(DEBUG) << "datafile '" << filename << "' has size '" << size << "', but maximal size is '" << header->_maximalSize << "'";
|
||||
LOG(DEBUG) << "datafile '" << filename << "' has size " << size << ", but maximal size is " << header->_maximalSize;
|
||||
}
|
||||
|
||||
// map datafile into memory
|
||||
|
|
|
@ -285,11 +285,10 @@ struct TRI_datafile_t {
|
|||
void* _mmHandle; // underlying memory map object handle (windows only)
|
||||
|
||||
public:
|
||||
TRI_voc_size_t _initSize; // initial size of the datafile (constant)
|
||||
TRI_voc_size_t _maximalSize; // maximal size of the datafile (adjusted
|
||||
// (=reduced) at runtime)
|
||||
TRI_voc_size_t _currentSize; // current size of the datafile
|
||||
TRI_voc_size_t _footerSize; // size of the final footer
|
||||
TRI_voc_size_t const _initSize; // initial size of the datafile (constant)
|
||||
TRI_voc_size_t _maximalSize; // maximal size of the datafile (may be adjusted/reduced at runtime)
|
||||
TRI_voc_size_t _currentSize; // current size of the datafile
|
||||
TRI_voc_size_t _footerSize; // size of the final footer
|
||||
|
||||
char* _data; // start of the data array
|
||||
char* _next; // end of the current data
|
||||
|
|
|
@ -873,14 +873,18 @@ int CollectorThread::transferMarkers(Logfile* logfile,
|
|||
arangodb::CollectionGuard collectionGuard(vocbase, collectionId, true);
|
||||
arangodb::LogicalCollection* collection = collectionGuard.collection();
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
||||
// no need to go on if the collection is already deleted
|
||||
if (collection->status() == TRI_VOC_COL_STATUS_DELETED) {
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
LOG_TOPIC(TRACE, Logger::COLLECTOR) << "collector transferring markers for '"
|
||||
<< collection->name()
|
||||
<< "', totalOperationsCount: " << totalOperationsCount;
|
||||
|
||||
std::unique_ptr<CollectorCache> cache(
|
||||
new CollectorCache(collectionId, databaseId, logfile,
|
||||
totalOperationsCount, operations.size()));
|
||||
|
||||
auto cache = std::make_unique<CollectorCache>(collectionId, databaseId, logfile,
|
||||
totalOperationsCount, operations.size());
|
||||
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
int res = TRI_ERROR_INTERNAL;
|
||||
|
|
|
@ -111,10 +111,11 @@ add_custom_target(remove_packages
|
|||
|
||||
list(APPEND CLEAN_PACKAGES_LIST remove_packages)
|
||||
|
||||
if (NOT ${ENV{SYMSRV}} STREQUAL "")
|
||||
set(SYMSRVDIR $ENV{SYMSRV})
|
||||
if (NOT ${SYMSRVDIR} STREQUAL "")
|
||||
message("Storing symbols:")
|
||||
add_custom_command(TARGET ${BIN_ARANGOD} POST_BUILD
|
||||
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}
|
||||
COMMAND "find -name \*pdb |grep -v Release |grep -v Debug |grep -v 3rdParty |grep -v vc120.pdb > pdbfiles_list.txt"
|
||||
COMMAND "symstore.exe add /f '@${PROJECT_BINARY_DIR}/pdbfiles_list.txt' /s '${ENV{SYMSRV}}' /t ArangoDB /compress")
|
||||
COMMAND "symstore.exe add /f '@${PROJECT_BINARY_DIR}/pdbfiles_list.txt' /s '${SYMSRVDIR}' /t ArangoDB /compress")
|
||||
endif()
|
||||
|
|
|
@ -309,7 +309,7 @@ function makeArgsArangod (options, appDir, role) {
|
|||
|
||||
return {
|
||||
'configuration': 'etc/testing/' + config,
|
||||
'--define': 'TOP_DIR=' + TOP_DIR,
|
||||
'define': 'TOP_DIR=' + TOP_DIR,
|
||||
'javascript.app-path': appDir,
|
||||
'http.trusted-origin': options.httpTrustedOrigin || 'all'
|
||||
};
|
||||
|
|
|
@ -38,11 +38,6 @@ var wait = require('internal').wait;
|
|||
var isEnterprise = require('internal').isEnterprise();
|
||||
var _ = require('lodash');
|
||||
|
||||
const inccv = {'/arango/Current/Version':{'op':'increment'}};
|
||||
const incpv = {'/arango/Plan/Version':{'op':'increment'}};
|
||||
const agencyDBs = '/' + global.ArangoAgency.prefix() + '/Current/Databases/';
|
||||
const agencyCols = '/' + global.ArangoAgency.prefix() + '/Current/Collections/';
|
||||
|
||||
var endpointToURL = function (endpoint) {
|
||||
if (endpoint.substr(0, 6) === 'ssl://') {
|
||||
return 'https://' + endpoint.substr(6);
|
||||
|
@ -306,12 +301,11 @@ function getLocalCollections () {
|
|||
// / @brief create databases if they exist in the plan but not locally
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function createLocalDatabases (plannedDatabases, currentDatabases) {
|
||||
function createLocalDatabases (plannedDatabases, currentDatabases, writeLocked) {
|
||||
var ourselves = global.ArangoServerState.id();
|
||||
var createDatabaseAgency = function (payload) {
|
||||
var envelope = {};
|
||||
envelope[agencyDBs + payload.name + '/' + ourselves] = payload;
|
||||
global.ArangoAgency.write([[envelope],[inccv]]);
|
||||
global.ArangoAgency.set('Current/Databases/' + payload.name + '/' + ourselves,
|
||||
payload);
|
||||
};
|
||||
|
||||
var db = require('internal').db;
|
||||
|
@ -345,14 +339,15 @@ function createLocalDatabases (plannedDatabases, currentDatabases) {
|
|||
payload.errorNum = err.errorNum;
|
||||
payload.errorMessage = err.errorMessage;
|
||||
}
|
||||
|
||||
createDatabaseAgency(payload);
|
||||
|
||||
writeLocked({ part: 'Current' },
|
||||
createDatabaseAgency,
|
||||
[ payload ]);
|
||||
} else if (typeof currentDatabases[name] !== 'object' || !currentDatabases[name].hasOwnProperty(ourselves)) {
|
||||
// mop: ok during cluster startup we have this buggy situation where a dbserver
|
||||
// has a database but has not yet announced it to the agency :S
|
||||
createDatabaseAgency(payload);
|
||||
|
||||
writeLocked({ part: 'Current' },
|
||||
createDatabaseAgency,
|
||||
[ payload ]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -362,15 +357,12 @@ function createLocalDatabases (plannedDatabases, currentDatabases) {
|
|||
// / @brief drop databases if they do exist locally but not in the plan
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function dropLocalDatabases (plannedDatabases) {
|
||||
function dropLocalDatabases (plannedDatabases, writeLocked) {
|
||||
var ourselves = global.ArangoServerState.id();
|
||||
|
||||
var dropDatabaseAgency = function (payload) {
|
||||
|
||||
try {
|
||||
var envelope = {};
|
||||
envelope[agencyDBs + payload.name + '/' + ourselves] = {"op":"delete"};
|
||||
global.ArangoAgency.write([[envelope],[inccv]]);
|
||||
global.ArangoAgency.remove('Current/Databases/' + payload.name + '/' + ourselves);
|
||||
} catch (err) {
|
||||
// ignore errors
|
||||
}
|
||||
|
@ -407,8 +399,9 @@ function dropLocalDatabases (plannedDatabases) {
|
|||
}
|
||||
db._dropDatabase(name);
|
||||
|
||||
dropDatabaseAgency({name:name});
|
||||
|
||||
writeLocked({ part: 'Current' },
|
||||
dropDatabaseAgency,
|
||||
[ { name: name } ]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -418,14 +411,12 @@ function dropLocalDatabases (plannedDatabases) {
|
|||
// / @brief clean up what's in Current/Databases for ourselves
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function cleanupCurrentDatabases (currentDatabases) {
|
||||
function cleanupCurrentDatabases (currentDatabases, writeLocked) {
|
||||
var ourselves = global.ArangoServerState.id();
|
||||
|
||||
var dropDatabaseAgency = function (payload) {
|
||||
try {
|
||||
var envelope = {};
|
||||
envelope[agencyDBs + payload.name + '/' + ourselves] = {"op":"delete"};
|
||||
global.ArangoAgency.write([[envelope],[inccv]]);
|
||||
global.ArangoAgency.remove('Current/Databases/' + payload.name + '/' + ourselves);
|
||||
} catch (err) {
|
||||
// ignore errors
|
||||
}
|
||||
|
@ -446,7 +437,9 @@ function cleanupCurrentDatabases (currentDatabases) {
|
|||
// we are entered for a database that we don't have locally
|
||||
console.debug("cleaning up entry for unknown database '%s'", name);
|
||||
|
||||
dropDatabaseAgency({name:name});
|
||||
writeLocked({ part: 'Current' },
|
||||
dropDatabaseAgency,
|
||||
[ { name: name } ]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -457,45 +450,38 @@ function cleanupCurrentDatabases (currentDatabases) {
|
|||
// / @brief handle database changes
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function handleDatabaseChanges (plan, current) {
|
||||
function handleDatabaseChanges (plan, current, writeLocked) {
|
||||
var plannedDatabases = plan.Databases;
|
||||
var currentDatabases = current.Databases;
|
||||
|
||||
createLocalDatabases(plannedDatabases, currentDatabases);
|
||||
dropLocalDatabases(plannedDatabases);
|
||||
cleanupCurrentDatabases(currentDatabases);
|
||||
createLocalDatabases(plannedDatabases, currentDatabases, writeLocked);
|
||||
dropLocalDatabases(plannedDatabases, writeLocked);
|
||||
cleanupCurrentDatabases(currentDatabases, writeLocked);
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief create collections if they exist in the plan but not locally
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function createLocalCollections (
|
||||
plannedCollections, planVersion, currentCollections, takeOverResponsibility) {
|
||||
|
||||
function createLocalCollections (plannedCollections, planVersion,
|
||||
currentCollections,
|
||||
takeOverResponsibility, writeLocked) {
|
||||
var ourselves = global.ArangoServerState.id();
|
||||
|
||||
var createCollectionAgency = function (database, shard, collInfo, err) {
|
||||
|
||||
var payload = {
|
||||
error: err.error,
|
||||
errorNum: err.errorNum,
|
||||
errorMessage: err.errorMessage,
|
||||
var createCollectionAgency = function (database, shard, collInfo, error) {
|
||||
var payload = { error: error.error,
|
||||
errorNum: error.errorNum,
|
||||
errorMessage: error.errorMessage,
|
||||
satellite: collInfo.replicationFactor === 0,
|
||||
indexes: collInfo.indexes,
|
||||
servers: [ ourselves ],
|
||||
planVersion: planVersion };
|
||||
planVersion: planVersion };
|
||||
|
||||
console.debug('creating Current/Collections/' + database + '/' +
|
||||
collInfo.planId + '/' + shard);
|
||||
|
||||
var envelope = {};
|
||||
envelope[agencyCols + database + '/' + collInfo.planId + '/' + shard] = payload;
|
||||
|
||||
global.ArangoAgency.set('Current/Collections/' + database + '/' +
|
||||
collInfo.planId + '/' + shard, payload);
|
||||
global.ArangoAgency.write([[inccv]]);
|
||||
|
||||
collInfo.planId + '/' + shard,
|
||||
payload);
|
||||
console.debug('creating Current/Collections/' + database + '/' +
|
||||
collInfo.planId + '/' + shard + ' done.');
|
||||
};
|
||||
|
@ -505,7 +491,7 @@ function createLocalCollections (
|
|||
var db = require('internal').db;
|
||||
db._useDatabase('_system');
|
||||
|
||||
var migrate = function() {
|
||||
var migrate = writeLocked => {
|
||||
var localDatabases = getLocalDatabases();
|
||||
var database;
|
||||
var i;
|
||||
|
@ -576,7 +562,9 @@ function createLocalCollections (
|
|||
}
|
||||
|
||||
if (isLeader) {
|
||||
createCollectionAgency(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
createCollectionAgency,
|
||||
[ database, shard, collInfo, error ]);
|
||||
didWrite = true;
|
||||
}
|
||||
} else {
|
||||
|
@ -601,7 +589,9 @@ function createLocalCollections (
|
|||
db._collection(shard).load();
|
||||
}
|
||||
if (isLeader) {
|
||||
createCollectionAgency(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
createCollectionAgency,
|
||||
[ database, shard, collInfo, error ]);
|
||||
didWrite = true;
|
||||
}
|
||||
}
|
||||
|
@ -630,7 +620,9 @@ function createLocalCollections (
|
|||
errorMessage: err3.errorMessage };
|
||||
}
|
||||
if (isLeader) {
|
||||
createCollectionAgency(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
createCollectionAgency,
|
||||
[ database, shard, collInfo, error ]);
|
||||
didWrite = true;
|
||||
}
|
||||
}
|
||||
|
@ -639,7 +631,9 @@ function createLocalCollections (
|
|||
if (error.error) {
|
||||
if (takeOverResponsibility && !didWrite) {
|
||||
if (isLeader) {
|
||||
takeOver(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
takeOver,
|
||||
[ database, shard, collInfo, error ]);
|
||||
}
|
||||
}
|
||||
continue; // No point to look for properties and
|
||||
|
@ -677,7 +671,9 @@ function createLocalCollections (
|
|||
changed = true;
|
||||
}
|
||||
if (changed && isLeader) {
|
||||
createCollectionAgency(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
createCollectionAgency,
|
||||
[ database, shard, collInfo, error ]);
|
||||
didWrite = true;
|
||||
}
|
||||
}
|
||||
|
@ -715,14 +711,18 @@ function createLocalCollections (
|
|||
}
|
||||
}
|
||||
if (changed2 && isLeader) {
|
||||
createCollectionAgency(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
createCollectionAgency,
|
||||
[ database, shard, collInfo, error ]);
|
||||
didWrite = true;
|
||||
}
|
||||
}
|
||||
|
||||
if ((takeOverResponsibility && !didWrite && isLeader) ||
|
||||
(!didWrite && isLeader && !wasLeader)) {
|
||||
takeOver(database, shard, collInfo, error);
|
||||
writeLocked({ part: 'Current' },
|
||||
takeOver,
|
||||
[ database, shard, collInfo, error ]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -742,8 +742,20 @@ function createLocalCollections (
|
|||
}
|
||||
};
|
||||
|
||||
migrate();
|
||||
|
||||
if (takeOverResponsibility) {
|
||||
// mop: if this is a complete takeover we need a global lock because
|
||||
// otherwise the coordinator might fetch results which are only partly
|
||||
// migrated
|
||||
var fakeLock = (lockInfo, cb, args) => {
|
||||
if (!lockInfo || lockInfo.part !== 'Current') {
|
||||
throw new Error('Invalid lockInfo ' + JSON.stringify(lockInfo));
|
||||
}
|
||||
return cb(...args);
|
||||
};
|
||||
writeLocked({ part: 'Current' }, migrate, [fakeLock]);
|
||||
} else {
|
||||
migrate(writeLocked);
|
||||
}
|
||||
}
|
||||
|
||||
function leaderResign (database, collId, shardName, ourselves) {
|
||||
|
@ -773,16 +785,15 @@ function leaderResign (database, collId, shardName, ourselves) {
|
|||
// / @brief drop collections if they exist locally but not in the plan
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function dropLocalCollections (plannedCollections, currentCollections) {
|
||||
function dropLocalCollections (plannedCollections, currentCollections,
|
||||
writeLocked) {
|
||||
var ourselves = global.ArangoServerState.id();
|
||||
|
||||
var dropCollectionAgency = function (database, shardID, id) {
|
||||
try {
|
||||
console.debug('dropping Current/Collections/' + database + '/' +
|
||||
id + '/' + shardID);
|
||||
var envelope = {};
|
||||
envelope[agencyCols + database + '/' + id + '/' + shardID] = {"op":"delete"};
|
||||
global.ArangoAgency.write([[envelope],[inccv]]);
|
||||
global.ArangoAgency.remove('Current/Collections/' + database + '/' + id + '/' + shardID);
|
||||
console.debug('dropping Current/Collections/' + database + '/' +
|
||||
id + '/' + shardID + ' done.');
|
||||
} catch (err) {
|
||||
|
@ -865,7 +876,9 @@ function dropLocalCollections (plannedCollections, currentCollections) {
|
|||
console.debug('cleaning out Current entry for shard %s in',
|
||||
'agency for %s/%s', collection, database,
|
||||
collections[collection].name);
|
||||
dropCollectionAgency(database, collection, collections[collection].planId);
|
||||
writeLocked({ part: 'Current' },
|
||||
dropCollectionAgency,
|
||||
[ database, collection, collections[collection].planId ]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -884,16 +897,13 @@ function dropLocalCollections (plannedCollections, currentCollections) {
|
|||
// / @brief clean up what's in Current/Collections for ourselves
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function cleanupCurrentCollections (plannedCollections, currentCollections) {
|
||||
function cleanupCurrentCollections (plannedCollections, currentCollections,
|
||||
writeLocked) {
|
||||
var dropCollectionAgency = function (database, collection, shardID) {
|
||||
try {
|
||||
console.debug('cleaning Current/Collections/' + database + '/' +
|
||||
collection + '/' + shardID);
|
||||
|
||||
var envelope = {};
|
||||
envelope[agencyCols + database + '/' + collection + '/' + shardID] = {"op": "delete"};
|
||||
global.ArangoAgency.write([[envelope],[inccv]]);
|
||||
|
||||
global.ArangoAgency.remove('Current/Collections/' + database + '/' + collection + '/' + shardID);
|
||||
console.debug('cleaning Current/Collections/' + database + '/' +
|
||||
collection + '/' + shardID + ' done.');
|
||||
} catch (err) {
|
||||
|
@ -927,7 +937,9 @@ function cleanupCurrentCollections (plannedCollections, currentCollections) {
|
|||
database,
|
||||
collection);
|
||||
|
||||
dropCollectionAgency(database, collection, shard);
|
||||
writeLocked({ part: 'Current' },
|
||||
dropCollectionAgency,
|
||||
[ database, collection, shard ]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1282,7 +1294,8 @@ function synchronizeLocalFollowerCollections (plannedCollections,
|
|||
// / @brief handle collection changes
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function handleCollectionChanges (plan, current, takeOverResponsibility) {
|
||||
function handleCollectionChanges (plan, current, takeOverResponsibility,
|
||||
writeLocked) {
|
||||
var plannedCollections = plan.Collections;
|
||||
var currentCollections = current.Collections;
|
||||
|
||||
|
@ -1290,9 +1303,10 @@ function handleCollectionChanges (plan, current, takeOverResponsibility) {
|
|||
|
||||
try {
|
||||
createLocalCollections(plannedCollections, plan.Version, currentCollections,
|
||||
takeOverResponsibility);
|
||||
dropLocalCollections(plannedCollections, currentCollections);
|
||||
cleanupCurrentCollections(plannedCollections, currentCollections);
|
||||
takeOverResponsibility, writeLocked);
|
||||
dropLocalCollections(plannedCollections, currentCollections, writeLocked);
|
||||
cleanupCurrentCollections(plannedCollections, currentCollections,
|
||||
writeLocked);
|
||||
if (!synchronizeLocalFollowerCollections(plannedCollections,
|
||||
currentCollections)) {
|
||||
// If not all needed jobs have been scheduled, then work is still
|
||||
|
@ -1393,7 +1407,7 @@ function primaryToSecondary () {
|
|||
// / @brief change handling trampoline function
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function handleChanges (plan, current) {
|
||||
function handleChanges (plan, current, writeLocked) {
|
||||
var changed = false;
|
||||
var role = ArangoServerState.role();
|
||||
if (role === 'PRIMARY' || role === 'SECONDARY') {
|
||||
|
@ -1438,12 +1452,12 @@ function handleChanges (plan, current) {
|
|||
}
|
||||
}
|
||||
|
||||
handleDatabaseChanges(plan, current);
|
||||
handleDatabaseChanges(plan, current, writeLocked);
|
||||
var success;
|
||||
if (role === 'PRIMARY' || role === 'COORDINATOR') {
|
||||
// Note: This is only ever called for DBservers (primary and secondary),
|
||||
// we keep the coordinator case here just in case...
|
||||
success = handleCollectionChanges(plan, current, changed);
|
||||
success = handleCollectionChanges(plan, current, changed, writeLocked);
|
||||
} else {
|
||||
success = setupReplication();
|
||||
}
|
||||
|
@ -1613,9 +1627,43 @@ var handlePlanChange = function (plan, current) {
|
|||
current: current.Version
|
||||
};
|
||||
|
||||
// ////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief execute an action under a write-lock
|
||||
// ////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function writeLocked (lockInfo, cb, args) {
|
||||
var timeout = lockInfo.timeout;
|
||||
if (timeout === undefined) {
|
||||
timeout = 60;
|
||||
}
|
||||
|
||||
var ttl = lockInfo.ttl;
|
||||
if (ttl === undefined) {
|
||||
ttl = 120;
|
||||
}
|
||||
if (require('internal').coverage || require('internal').valgrind) {
|
||||
ttl *= 10;
|
||||
timeout *= 10;
|
||||
}
|
||||
|
||||
global.ArangoAgency.lockWrite(lockInfo.part, ttl, timeout);
|
||||
|
||||
try {
|
||||
cb.apply(null, args);
|
||||
global.ArangoAgency.increaseVersion(lockInfo.part + '/Version');
|
||||
|
||||
let version = global.ArangoAgency.get(lockInfo.part + '/Version');
|
||||
versions[lockInfo.part.toLowerCase()] = version.arango[lockInfo.part].Version;
|
||||
|
||||
global.ArangoAgency.unlockWrite(lockInfo.part, timeout);
|
||||
} catch (err) {
|
||||
global.ArangoAgency.unlockWrite(lockInfo.part, timeout);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
versions.success = handleChanges(plan, current);
|
||||
versions.success = handleChanges(plan, current, writeLocked);
|
||||
|
||||
console.debug('plan change handling successful');
|
||||
} catch (err) {
|
||||
|
|
|
@ -140,6 +140,8 @@ struct IndexBucket {
|
|||
TRI_ASSERT(_file == -1);
|
||||
throw;
|
||||
}
|
||||
|
||||
_nrCollisions = 0;
|
||||
}
|
||||
|
||||
void deallocate() {
|
||||
|
@ -206,6 +208,7 @@ struct IndexBucket {
|
|||
_table = nullptr;
|
||||
_nrAlloc = 0;
|
||||
_nrUsed = 0;
|
||||
_nrCollisions = 0;
|
||||
}
|
||||
|
||||
int allocateTempfile(char*& filename, size_t filesize) {
|
||||
|
|
|
@ -876,14 +876,17 @@ int TRI_UnlinkFile(char const* filename) {
|
|||
int res = TRI_UNLINK(filename);
|
||||
|
||||
if (res != 0) {
|
||||
int e = errno;
|
||||
TRI_set_errno(TRI_ERROR_SYS_ERROR);
|
||||
LOG(TRACE) << "cannot unlink file '" << filename
|
||||
<< "': " << TRI_LAST_ERROR_STR;
|
||||
int e = TRI_errno();
|
||||
if (e == ENOENT) {
|
||||
return TRI_ERROR_FILE_NOT_FOUND;
|
||||
}
|
||||
if (e == EPERM) {
|
||||
return TRI_ERROR_FORBIDDEN;
|
||||
}
|
||||
return e;
|
||||
return TRI_ERROR_SYS_ERROR;
|
||||
}
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
|
|
Loading…
Reference in New Issue