1
0
Fork 0

fixed tests

This commit is contained in:
Jan Steemann 2014-06-18 16:34:11 +02:00
parent 0567e64f2f
commit 8b9cdbe075
30 changed files with 1165 additions and 334 deletions

View File

@ -112,8 +112,7 @@ SERVER_OPT := \
--javascript.app-path @top_srcdir@/js/apps \
--javascript.startup-directory @top_srcdir@/js \
--log.file "" \
--ruby.action-directory @top_srcdir@/mr/actions \
--ruby.modules-path @top_srcdir@/mr/server/modules:@top_srcdir@/mr/common/modules \
--wal.reserve-logfiles 1 \
--server.threads 4 \
$(SERVER_START)
@ -201,8 +200,8 @@ RECOVERY_TESTS = \
execute-recovery-test:
@rm -rf "$(VOCDIR)"
@mkdir -p "$(VOCDIR)/databases"
@builddir@/bin/arangod "$(VOCDIR)" --no-server $(SERVER_OPT) --server.threads 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter setup || true # the server will crash with segfault intentionally in this test
$(VALGRIND) @builddir@/bin/arangod --no-server "$(VOCDIR)" $(SERVER_OPT) --server.threads 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter recover || test "x$(FORCE)" == "x1"
@builddir@/bin/arangod "$(VOCDIR)" --no-server $(SERVER_OPT) --server.threads 1 --wal.reserve-logfiles 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter setup || true # the server will crash with segfault intentionally in this test
$(VALGRIND) @builddir@/bin/arangod --no-server "$(VOCDIR)" $(SERVER_OPT) --server.threads 1 --wal.reserve-logfiles 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter recover || test "x$(FORCE)" == "x1"
unittests-recovery:
@echo
@ -211,16 +210,19 @@ unittests-recovery:
@echo "================================================================================"
@echo
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="multi-database-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="disk-full-no-collection-journal"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info-with-flush"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info-no-flush"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info-multiple-logs"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="insert-update-remove"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="insert-update-remove-distance"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="multi-database-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="big-transaction-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="transaction-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="transaction-durability-multiple"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="corrupt-wal-marker-multiple"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="corrupt-wal-marker-single"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info"
@rm -rf "$(VOCDIR)"
@rm -rf "$(VOCDIR)" core
@echo
else
@ -414,6 +416,7 @@ SHELL_COMMON = \
SHELL_SERVER_ONLY = \
@top_srcdir@/js/server/tests/shell-readonly.js \
@top_srcdir@/js/server/tests/shell-wal-noncluster.js \
@top_srcdir@/js/server/tests/shell-sharding-helpers.js \
@top_srcdir@/js/server/tests/shell-compaction-noncluster.js \
@top_srcdir@/js/server/tests/shell-tasks.js \
@ -564,7 +567,7 @@ unittests-shell-client-readonly:
$(MAKE) start-server PID=$(PID) SERVER_START="--server.endpoint unix://$(VOCDIR)/arango.sock --server.disable-authentication true" PROTO=unix
@echo
@echo "================================================================================"
@echo "<< SHELL CLIENT READONLY >>"
@echo "<< SHELL CLIENT READONLY >>"
@echo "================================================================================"
@echo

View File

@ -832,6 +832,8 @@ int ArangoServer::startupServer () {
// finally flush the write-ahead log so all data in the WAL goes into the collections
wal::LogfileManager::instance()->flush(true, true, true);
// WAL recovery done after here
// setup the V8 actions
if (startServer) {
_applicationV8->prepareActions();
@ -874,9 +876,6 @@ int ArangoServer::startupServer () {
_applicationServer->start();
// load authentication
TRI_LoadAuthInfoVocBase(vocbase);
// if the authentication info could not be loaded, but authentication is turned on,
// then we refuse to start
if (! vocbase->_authInfoLoaded && ! _disableAuthentication) {

View File

@ -3781,11 +3781,31 @@ static v8::Handle<v8::Value> JS_Transaction (v8::Arguments const& argv) {
return scope.Close(result);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief adjusts the WAL configuration at runtime
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_AdjustWal (v8::Arguments const& argv) {
v8::HandleScope scope;
if (argv.Length() != 1 || ! argv[0]->IsObject()) {
TRI_V8_EXCEPTION_USAGE(scope, "adjustWal(<object>)");
}
v8::Handle<v8::Object> object = v8::Handle<v8::Object>::Cast(argv[0]);
if (object->Has(TRI_V8_STRING("reserveLogfiles"))) {
uint32_t logfiles = static_cast<uint32_t>(TRI_ObjectToUInt64(object->Get(TRI_V8_STRING("reserveLogfiles")), true));
triagens::wal::LogfileManager::instance()->reserveLogfiles(logfiles);
}
return scope.Close(v8::True());
}
////////////////////////////////////////////////////////////////////////////////
/// @brief flush the currently open WAL logfile
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_Flush (v8::Arguments const& argv) {
static v8::Handle<v8::Value> JS_FlushWal (v8::Arguments const& argv) {
v8::HandleScope scope;
bool waitForSync = false;
@ -6262,8 +6282,8 @@ static v8::Handle<v8::Value> JS_FiguresVocbaseCol (v8::Arguments const& argv) {
indexes->Set(v8::String::New("count"), v8::Number::New((double) info->_numberIndexes));
indexes->Set(v8::String::New("size"), v8::Number::New((double) info->_sizeIndexes));
indexes->Set(v8::String::New("lastTick"), V8TickId(info->_tickMax));
indexes->Set(v8::String::New("uncollectedLogfileEntries"), v8::Number::New((double) info->_uncollectedLogfileEntries));
result->Set(v8::String::New("lastTick"), V8TickId(info->_tickMax));
result->Set(v8::String::New("uncollectedLogfileEntries"), v8::Number::New((double) info->_uncollectedLogfileEntries));
TRI_Free(TRI_UNKNOWN_MEM_ZONE, info);
@ -7061,7 +7081,7 @@ static v8::Handle<v8::Value> JS_RotateVocbaseCol (v8::Arguments const& argv) {
v8::Handle<v8::Object> err;
TRI_vocbase_col_t const* collection = UseCollection(argv.Holder(), &err);
if (collection == 0) {
if (collection == nullptr) {
return scope.Close(v8::ThrowException(err));
}
@ -10132,7 +10152,8 @@ void TRI_InitV8VocBridge (v8::Handle<v8::Context> context,
TRI_AddGlobalFunctionVocbase(context, "LIST_ENDPOINTS", JS_ListEndpoints, true);
TRI_AddGlobalFunctionVocbase(context, "RELOAD_AUTH", JS_ReloadAuth, true);
TRI_AddGlobalFunctionVocbase(context, "TRANSACTION", JS_Transaction, true);
TRI_AddGlobalFunctionVocbase(context, "WAL_FLUSH", JS_Flush, true);
TRI_AddGlobalFunctionVocbase(context, "WAL_FLUSH", JS_FlushWal, true);
TRI_AddGlobalFunctionVocbase(context, "WAL_ADJUST", JS_AdjustWal, true);
// .............................................................................
// create global variables

View File

@ -432,16 +432,11 @@ bool TRI_InsertInitialAuthInfo (TRI_vocbase_t* vocbase) {
////////////////////////////////////////////////////////////////////////////////
bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
TRI_vocbase_col_t* collection;
void** beg;
void** end;
void** ptr;
LOG_DEBUG("starting to load authentication and authorisation information");
collection = TRI_LookupCollectionByNameVocBase(vocbase, TRI_COL_NAME_USERS);
TRI_vocbase_col_t* collection = TRI_LookupCollectionByNameVocBase(vocbase, TRI_COL_NAME_USERS);
if (collection == NULL) {
if (collection == nullptr) {
LOG_INFO("collection '_users' does not exist, no authentication available");
return false;
}
@ -459,27 +454,26 @@ bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
TRI_WriteLockReadWriteLock(&vocbase->_authInfoLock);
ClearAuthInfo(vocbase);
beg = document->_primaryIndex._table;
end = beg + document->_primaryIndex._nrAlloc;
ptr = beg;
void** beg = document->_primaryIndex._table;
void** end = beg + document->_primaryIndex._nrAlloc;
void** ptr = beg;
for (; ptr < end; ++ptr) {
if (*ptr) {
TRI_vocbase_auth_t* auth;
TRI_doc_mptr_t const* d;
TRI_shaped_json_t shapedJson;
d = (TRI_doc_mptr_t const*) *ptr;
TRI_doc_mptr_t const* d = (TRI_doc_mptr_t const*) *ptr;
TRI_EXTRACT_SHAPED_JSON_MARKER(shapedJson, d->getDataPtr()); // PROTECTED by trx here
auth = ConvertAuthInfo(vocbase, document, &shapedJson);
if (auth != NULL) {
if (auth != nullptr) {
TRI_vocbase_auth_t* old = static_cast<TRI_vocbase_auth_t*>(TRI_InsertKeyAssociativePointer(&vocbase->_authInfo, auth->_username, auth, true));
if (old != NULL) {
if (old != nullptr) {
FreeAuthInfo(old);
}
}
@ -562,7 +556,10 @@ bool TRI_PopulateAuthInfo (TRI_vocbase_t* vocbase,
////////////////////////////////////////////////////////////////////////////////
bool TRI_ReloadAuthInfo (TRI_vocbase_t* vocbase) {
return TRI_LoadAuthInfo(vocbase);
bool result = TRI_LoadAuthInfo(vocbase);
vocbase->_authInfoLoaded = result;
return result;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -258,11 +258,9 @@ static TRI_datafile_t* CreateCompactor (TRI_document_collection_t* document,
if (journal == NULL) {
if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) {
document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP);
document->_state = TRI_COL_STATE_READ;
}
else {
document->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL);
document->_state = TRI_COL_STATE_WRITE_ERROR;
}
return NULL;
@ -1807,6 +1805,8 @@ static bool OpenIterator (TRI_df_marker_t const* marker,
////////////////////////////////////////////////////////////////////////////////
static int FillInternalIndexes (TRI_document_collection_t* document) {
TRI_ASSERT(! triagens::wal::LogfileManager::instance()->isInRecovery());
int res = TRI_ERROR_NO_ERROR;
for (size_t i = 0; i < document->_allIndexes._length; ++i) {
@ -2389,18 +2389,25 @@ TRI_datafile_t* TRI_CreateJournalDocumentCollection (TRI_document_collection_t*
TRI_FreeString(TRI_CORE_MEM_ZONE, number);
TRI_FreeString(TRI_CORE_MEM_ZONE, jname);
TRI_DEBUG_INTENTIONAL_FAIL_IF("CreateJournalDocumentCollection") {
// simulate disk full
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP);
errno = ENOSPC;
return nullptr;
}
journal = TRI_CreateDatafile(filename, fid, journalSize, true);
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
}
if (journal == NULL) {
if (journal == nullptr) {
if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) {
document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP);
document->_state = TRI_COL_STATE_READ;
}
else {
document->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL);
document->_state = TRI_COL_STATE_WRITE_ERROR;
}
return NULL;
@ -2815,11 +2822,16 @@ TRI_document_collection_t* TRI_OpenDocumentCollection (TRI_vocbase_t* vocbase,
TRI_InitVocShaper(document->getShaper()); // ONLY in OPENCOLLECTION, PROTECTED by fake trx here
// fill internal indexes (this is, the edges index at the moment)
FillInternalIndexes(document);
// secondary indexes must not be loaded during recovery
// this is because creating indexes might write attribute markers into the WAL,
// but the WAL is read-only at the point of recovery
if (! triagens::wal::LogfileManager::instance()->isInRecovery()) {
// fill internal indexes (this is, the edges index at the moment)
FillInternalIndexes(document);
// fill user-defined secondary indexes
TRI_IterateIndexCollection(collection, OpenIndexIterator, collection);
// fill user-defined secondary indexes
TRI_IterateIndexCollection(collection, OpenIndexIterator, collection);
}
return document;
}
@ -3474,18 +3486,12 @@ static int ComparePidName (void const* left, void const* right) {
/// note: the write-lock for the collection must be held to call this
////////////////////////////////////////////////////////////////////////////////
void TRI_UpdateStatisticsDocumentCollection (TRI_document_collection_t* document,
TRI_voc_rid_t rid,
bool force,
int64_t logfileEntries) {
void TRI_UpdateRevisionDocumentCollection (TRI_document_collection_t* document,
TRI_voc_rid_t rid,
bool force) {
if (rid > 0) {
SetRevision(document, rid, force);
}
if (! document->_info._isVolatile) {
// only count logfileEntries if the collection is durable
document->_uncollectedLogfileEntries += logfileEntries;
}
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -676,10 +676,9 @@ void TRI_FreeDocumentCollection (TRI_document_collection_t*);
/// note: the write-lock for the collection must be held to call this
////////////////////////////////////////////////////////////////////////////////
void TRI_UpdateStatisticsDocumentCollection (TRI_document_collection_t*,
TRI_voc_rid_t,
bool,
int64_t);
void TRI_UpdateRevisionDocumentCollection (TRI_document_collection_t*,
TRI_voc_rid_t,
bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not a collection is fully collected

View File

@ -512,8 +512,8 @@ static int OpenDatabases (TRI_server_t* server,
TRI_vector_string_t files;
size_t i, n;
int res;
if (server->_iterateMarkersOnOpen) {
if (server->_iterateMarkersOnOpen && ! server->_hasCreatedSystemDatabase) {
LOG_WARNING("no shutdown info found. scanning datafiles for last tick...");
}
@ -1318,6 +1318,8 @@ static int InitDatabases (TRI_server_t* server,
res = TRI_ERROR_OUT_OF_MEMORY;
}
}
server->_hasCreatedSystemDatabase = true;
}
if (res == TRI_ERROR_NO_ERROR && performUpgrade) {
@ -1344,6 +1346,134 @@ static int InitDatabases (TRI_server_t* server,
return res;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief unloads all open collections after recovery
/// this is necessary to not mess up collection statistics
////////////////////////////////////////////////////////////////////////////////
static int SignalUnloadAll (TRI_server_t* server) {
LOG_TRACE("sending unload signal to all collections of all databases");
DatabaseReadLocker locker(&server->_databasesLock);
size_t const n = server->_databases._nrAlloc;
// iterate over all databases
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// iterate over all collections
TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase);
// starts unloading of collections
for (size_t j = 0; j < vocbase->_collections._length; ++j) {
TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(vocbase->_collections._buffer[j]);
TRI_UnloadCollectionVocBase(vocbase, collection, true);
}
TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase);
}
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief waits until all collections of all databases have unloaded
////////////////////////////////////////////////////////////////////////////////
static int WaitForUnloadAll (TRI_server_t* server) {
LOG_TRACE("unloading all collections of all databases");
while (true) {
bool mustWait = false;
// iterate over all databases
{
DatabaseReadLocker locker(&server->_databasesLock);
size_t const n = server->_databases._nrAlloc;
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// iterate over all collections
TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase);
// starts unloading of collections
for (size_t j = 0; j < vocbase->_collections._length; ++j) {
TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(vocbase->_collections._buffer[j]);
TRI_READ_LOCK_STATUS_VOCBASE_COL(collection);
bool isUnloaded = (collection->_status == TRI_VOC_COL_STATUS_UNLOADED ||
collection->_status == TRI_VOC_COL_STATUS_DELETED ||
collection->_status == TRI_VOC_COL_STATUS_CORRUPTED);
TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection);
if (! isUnloaded) {
// abort early
mustWait = true;
break;
}
}
TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase);
if (mustWait) {
break;
}
}
}
}
if (mustWait) {
// let something else happen and retry
usleep(10 * 1000);
continue;
}
// all collections have unloaded
break;
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief unloads all open collections after recovery
/// this is necessary to not mess up collection statistics
////////////////////////////////////////////////////////////////////////////////
static int InitAll (TRI_server_t* server) {
DatabaseReadLocker locker(&server->_databasesLock);
size_t const n = server->_databases._nrAlloc;
// iterate over all databases
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// initialise the authentication data for the database
TRI_ReloadAuthInfo(vocbase);
// start the compactor for the database
TRI_StartCompactorVocBase(vocbase);
}
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief database manager thread main loop
/// the purpose of this thread is to physically remove directories of databases
@ -1489,6 +1619,7 @@ int TRI_InitServer (TRI_server_t* server,
TRI_ASSERT(basePath != nullptr);
server->_iterateMarkersOnOpen = iterateMarkersOnOpen;
server->_hasCreatedSystemDatabase = false;
// c++ object, may be null in console mode
server->_applicationEndpointServer = applicationEndpointServer;
@ -1873,33 +2004,10 @@ int TRI_StartServer (TRI_server_t* server,
////////////////////////////////////////////////////////////////////////////////
int TRI_InitDatabasesServer (TRI_server_t* server) {
DatabaseWriteLocker locker(&server->_databasesLock);
SignalUnloadAll(server);
WaitForUnloadAll(server);
InitAll(server);
size_t n = server->_databases._nrAlloc;
// iterate over all databases
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// iterate over all collections
TRI_WRITE_LOCK_COLLECTIONS_VOCBASE(vocbase);
// starts unloading of collections
for (size_t j = 0; j < vocbase->_collections._length; ++j) {
TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(vocbase->_collections._buffer[j]);
TRI_UnloadCollectionVocBase(vocbase, collection, true);
}
TRI_WRITE_UNLOCK_COLLECTIONS_VOCBASE(vocbase);
// start the compactor for the database
TRI_StartCompactorVocBase(vocbase);
}
}
return TRI_ERROR_NO_ERROR;
}
@ -2085,6 +2193,7 @@ int TRI_CreateDatabaseServer (TRI_server_t* server,
CreateApplicationDirectory(vocbase->_name, server->_appPath);
CreateApplicationDirectory(vocbase->_name, server->_devAppPath);
TRI_ReloadAuthInfo(vocbase);
TRI_StartCompactorVocBase(vocbase);
// increase reference counter

View File

@ -70,6 +70,7 @@ typedef struct TRI_server_s {
bool _disableReplicationLoggers;
bool _disableReplicationAppliers;
bool _iterateMarkersOnOpen;
bool _hasCreatedSystemDatabase;
bool _initialised;
}

View File

@ -179,6 +179,10 @@ static void FreeOperations (TRI_transaction_t* trx) {
if (mustRollback) {
document->_info._revision = trxCollection->_originalRevision;
}
else if (! document->_info._isVolatile) {
// only count logfileEntries if the collection is durable
document->_uncollectedLogfileEntries += trxCollection->_operations->size();
}
delete trxCollection->_operations;
trxCollection->_operations = nullptr;
@ -908,6 +912,8 @@ int TRI_AddOperationTransaction (triagens::wal::DocumentOperation& operation,
// operation is directly executed
operation.handle();
++document->_uncollectedLogfileEntries;
if (operation.type == TRI_VOC_DOCUMENT_OPERATION_UPDATE ||
operation.type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) {
// update datafile statistics for the old header
@ -937,7 +943,7 @@ int TRI_AddOperationTransaction (triagens::wal::DocumentOperation& operation,
copy->handle();
}
TRI_UpdateStatisticsDocumentCollection(document, operation.rid, false, 1);
TRI_UpdateRevisionDocumentCollection(document, operation.rid, false);
return TRI_ERROR_NO_ERROR;
}

View File

@ -731,7 +731,7 @@ int TRI_InsertAttributeVocShaper (TRI_shaper_t* s,
char const* name = shaper->_collection->_info._name;
#ifdef TRI_ENABLE_MAINTAINER_MODE
LOG_WARNING("found duplicate attribute name '%s' in collection '%s'", p, name);
LOG_ERROR("found duplicate attribute name '%s' in collection '%s'", p, name);
TRI_ASSERT(false);
#else
LOG_TRACE("found duplicate attribute name '%s' in collection '%s'", p, name);
@ -744,7 +744,7 @@ int TRI_InsertAttributeVocShaper (TRI_shaper_t* s,
char const* name = shaper->_collection->_info._name;
#ifdef TRI_ENABLE_MAINTAINER_MODE
LOG_WARNING("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name);
LOG_ERROR("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name);
TRI_ASSERT(false);
#else
LOG_TRACE("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name);

View File

@ -1460,8 +1460,6 @@ TRI_vocbase_t* TRI_OpenVocBase (TRI_server_t* server,
return NULL;
}
TRI_ReloadAuthInfo(vocbase);
ScanTrxCollection(vocbase);
// .............................................................................
@ -1639,14 +1637,6 @@ int TRI_StopCompactorVocBase (TRI_vocbase_t* vocbase) {
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief load authentication information
////////////////////////////////////////////////////////////////////////////////
void TRI_LoadAuthInfoVocBase (TRI_vocbase_t* vocbase) {
vocbase->_authInfoLoaded = TRI_LoadAuthInfo(vocbase);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief returns all known (document) collections
////////////////////////////////////////////////////////////////////////////////

View File

@ -446,12 +446,6 @@ void TRI_StartCompactorVocBase (TRI_vocbase_t*);
int TRI_StopCompactorVocBase (TRI_vocbase_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief load authentication information
////////////////////////////////////////////////////////////////////////////////
void TRI_LoadAuthInfoVocBase (TRI_vocbase_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief returns all known collections
////////////////////////////////////////////////////////////////////////////////

View File

@ -348,10 +348,10 @@ void CollectorThread::run () {
}
catch (triagens::arango::Exception const& ex) {
int res = ex.code();
LOG_ERROR("got unexpected error in collectorThread: %s", TRI_errno_string(res));
LOG_ERROR("got unexpected error in collectorThread::run: %s", TRI_errno_string(res));
}
catch (...) {
LOG_ERROR("got unspecific error in collectorThread");
LOG_ERROR("got unspecific error in collectorThread::run");
}
if (stop == 0 && ! worked) {
@ -438,27 +438,35 @@ bool CollectorThread::processQueuedOperations () {
if (res == TRI_ERROR_LOCK_TIMEOUT) {
// could not acquire write-lock for collection in time
// do not delete the operations
++it2;
continue;
}
// delete the object
delete (*it2);
if (res == TRI_ERROR_NO_ERROR) {
LOG_TRACE("queued operations applied successfully");
}
else if (res == TRI_ERROR_ARANGO_DATABASE_NOT_FOUND ||
res == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
// these are expected errors
LOG_TRACE("removing queued operations for already deleted collection");
res = TRI_ERROR_NO_ERROR;
}
else {
LOG_WARNING("got unexpected error code while applying queued operations: %s", TRI_errno_string(res));
}
// delete the element from the vector while iterating over the vector
it2 = operations.erase(it2);
if (res == TRI_ERROR_NO_ERROR) {
// delete the object
delete (*it2);
_logfileManager->decreaseCollectQueueSize(logfile);
// delete the element from the vector while iterating over the vector
it2 = operations.erase(it2);
_logfileManager->decreaseCollectQueueSize(logfile);
}
else {
++it2;
}
}
// next collection
@ -505,7 +513,10 @@ int CollectorThread::processCollectionOperations (CollectorCache* cache) {
triagens::arango::TransactionBase trx(true);
TRI_document_collection_t* document = collection->_collection;
// first try to read-lock the compactor-lock, afterwards try to write-lock the collection
// if any locking attempt fails, release and try again next time
if (! TRI_TryReadLockReadWriteLock(&document->_compactionLock)) {
return TRI_ERROR_LOCK_TIMEOUT;
}
@ -524,92 +535,95 @@ int CollectorThread::processCollectionOperations (CollectorCache* cache) {
// now we have the write lock on the collection
LOG_TRACE("wal collector processing operations for collection '%s'", document->_info._name);
for (auto it = cache->operations->begin(); it != cache->operations->end(); ++it) {
auto operation = (*it);
if (! _inRecovery) {
for (auto it = cache->operations->begin(); it != cache->operations->end(); ++it) {
auto operation = (*it);
TRI_df_marker_t const* walMarker = reinterpret_cast<TRI_df_marker_t const*>(operation.walPosition);
TRI_df_marker_t const* marker = reinterpret_cast<TRI_df_marker_t const*>(operation.datafilePosition);
TRI_voc_size_t const datafileMarkerSize = operation.datafileMarkerSize;
TRI_voc_fid_t const fid = operation.datafileId;
TRI_df_marker_t const* walMarker = reinterpret_cast<TRI_df_marker_t const*>(operation.walPosition);
TRI_df_marker_t const* marker = reinterpret_cast<TRI_df_marker_t const*>(operation.datafilePosition);
TRI_voc_size_t const datafileMarkerSize = operation.datafileMarkerSize;
TRI_voc_fid_t const fid = operation.datafileId;
TRI_ASSERT(walMarker != nullptr);
TRI_ASSERT(marker != nullptr);
if (walMarker->_type == TRI_WAL_MARKER_DOCUMENT) {
wal::document_marker_t const* m = reinterpret_cast<wal::document_marker_t const*>(walMarker);
char const* key = reinterpret_cast<char const*>(m) + m->_offsetKey;
TRI_doc_mptr_t* found = static_cast<TRI_doc_mptr_t*>(TRI_LookupByKeyPrimaryIndex(&document->_primaryIndex, key));
if (found == nullptr || found->_rid != m->_revisionId) {
// somebody inserted a new revision of the document
auto& dfi = createDfi(cache, fid);
dfi._numberDead++;
dfi._sizeDead += (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
dfi._numberAlive--;
dfi._sizeAlive -= (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
}
else {
// update cap constraint info
document->_headersPtr->adjustTotalSize(TRI_DF_ALIGN_BLOCK(walMarker->_size),
TRI_DF_ALIGN_BLOCK(datafileMarkerSize));
// we can safely update the master pointer's dataptr value
found->setDataPtr(static_cast<void*>(const_cast<char*>(operation.datafilePosition)));
found->_fid = fid;
}
}
else if (walMarker->_type == TRI_WAL_MARKER_EDGE) {
wal::edge_marker_t const* m = reinterpret_cast<wal::edge_marker_t const*>(walMarker);
char const* key = reinterpret_cast<char const*>(m) + m->_offsetKey;
TRI_doc_mptr_t* found = static_cast<TRI_doc_mptr_t*>(TRI_LookupByKeyPrimaryIndex(&document->_primaryIndex, key));
if (found == nullptr || found->_rid != m->_revisionId) {
// somebody inserted a new revision of the document
auto& dfi = createDfi(cache, fid);
dfi._numberDead++;
dfi._sizeDead += (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
dfi._numberAlive--;
dfi._sizeAlive -= (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
}
else {
// update cap constraint info
document->_headersPtr->adjustTotalSize(TRI_DF_ALIGN_BLOCK(walMarker->_size),
TRI_DF_ALIGN_BLOCK(datafileMarkerSize));
// we can safely update the master pointer's dataptr value
found->setDataPtr(static_cast<void*>(const_cast<char*>(operation.datafilePosition)));
found->_fid = fid;
}
}
else if (walMarker->_type == TRI_WAL_MARKER_REMOVE) {
wal::remove_marker_t const* m = reinterpret_cast<wal::remove_marker_t const*>(walMarker);
char const* key = reinterpret_cast<char const*>(m) + sizeof(wal::remove_marker_t);
TRI_doc_mptr_t* found = static_cast<TRI_doc_mptr_t*>(TRI_LookupByKeyPrimaryIndex(&document->_primaryIndex, key));
TRI_ASSERT(walMarker != nullptr);
TRI_ASSERT(marker != nullptr);
if (found != nullptr && found->_rid > m->_revisionId) {
// somebody re-created the document with a newer revision
auto& dfi = createDfi(cache, fid);
dfi._numberDead++;
dfi._sizeDead += (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
dfi._numberAlive--;
dfi._sizeAlive -= (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
if (walMarker->_type == TRI_WAL_MARKER_DOCUMENT) {
wal::document_marker_t const* m = reinterpret_cast<wal::document_marker_t const*>(walMarker);
char const* key = reinterpret_cast<char const*>(m) + m->_offsetKey;
TRI_doc_mptr_t* found = static_cast<TRI_doc_mptr_t*>(TRI_LookupByKeyPrimaryIndex(&document->_primaryIndex, key));
if (found == nullptr || found->_rid != m->_revisionId) {
// somebody inserted a new revision of the document
auto& dfi = createDfi(cache, fid);
dfi._numberDead++;
dfi._sizeDead += (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
dfi._numberAlive--;
dfi._sizeAlive -= (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
}
else {
// update cap constraint info
document->_headersPtr->adjustTotalSize(TRI_DF_ALIGN_BLOCK(walMarker->_size),
TRI_DF_ALIGN_BLOCK(datafileMarkerSize));
// we can safely update the master pointer's dataptr value
found->setDataPtr(static_cast<void*>(const_cast<char*>(operation.datafilePosition)));
found->_fid = fid;
}
}
else if (walMarker->_type == TRI_WAL_MARKER_EDGE) {
wal::edge_marker_t const* m = reinterpret_cast<wal::edge_marker_t const*>(walMarker);
char const* key = reinterpret_cast<char const*>(m) + m->_offsetKey;
TRI_doc_mptr_t* found = static_cast<TRI_doc_mptr_t*>(TRI_LookupByKeyPrimaryIndex(&document->_primaryIndex, key));
if (found == nullptr || found->_rid != m->_revisionId) {
// somebody inserted a new revision of the document
auto& dfi = createDfi(cache, fid);
dfi._numberDead++;
dfi._sizeDead += (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
dfi._numberAlive--;
dfi._sizeAlive -= (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
}
else {
// update cap constraint info
document->_headersPtr->adjustTotalSize(TRI_DF_ALIGN_BLOCK(walMarker->_size),
TRI_DF_ALIGN_BLOCK(datafileMarkerSize));
// we can safely update the master pointer's dataptr value
found->setDataPtr(static_cast<void*>(const_cast<char*>(operation.datafilePosition)));
found->_fid = fid;
}
}
else if (walMarker->_type == TRI_WAL_MARKER_REMOVE) {
wal::remove_marker_t const* m = reinterpret_cast<wal::remove_marker_t const*>(walMarker);
char const* key = reinterpret_cast<char const*>(m) + sizeof(wal::remove_marker_t);
TRI_doc_mptr_t* found = static_cast<TRI_doc_mptr_t*>(TRI_LookupByKeyPrimaryIndex(&document->_primaryIndex, key));
if (found != nullptr && found->_rid > m->_revisionId) {
// somebody re-created the document with a newer revision
auto& dfi = createDfi(cache, fid);
dfi._numberDead++;
dfi._sizeDead += (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
dfi._numberAlive--;
dfi._sizeAlive -= (int64_t) TRI_DF_ALIGN_BLOCK(datafileMarkerSize);
}
}
else if (walMarker->_type == TRI_WAL_MARKER_ATTRIBUTE) {
// move the pointer to the attribute from WAL to the datafile
TRI_MoveMarkerVocShaper(document->getShaper(), const_cast<TRI_df_marker_t*>(marker)); // ONLY IN COLLECTOR, PROTECTED by COLLECTION LOCK and fake trx here
}
else if (walMarker->_type == TRI_WAL_MARKER_SHAPE) {
// move the pointer to the shape from WAL to the datafile
TRI_MoveMarkerVocShaper(document->getShaper(), const_cast<TRI_df_marker_t*>(marker)); // ONLY IN COLLECTOR, PROTECTED by COLLECTION LOCK and fake trx here
}
else {
// a marker we won't care about
}
}
else if (walMarker->_type == TRI_WAL_MARKER_ATTRIBUTE) {
// move the pointer to the attribute from WAL to the datafile
TRI_MoveMarkerVocShaper(document->getShaper(), const_cast<TRI_df_marker_t*>(marker)); // ONLY IN COLLECTOR, PROTECTED by COLLECTION LOCK and fake trx here
}
else if (walMarker->_type == TRI_WAL_MARKER_SHAPE) {
// move the pointer to the shape from WAL to the datafile
TRI_MoveMarkerVocShaper(document->getShaper(), const_cast<TRI_df_marker_t*>(marker)); // ONLY IN COLLECTOR, PROTECTED by COLLECTION LOCK and fake trx here
}
else {
// a marker we won't care about
}
}
} // ! _inRecovery
// finally update all datafile statistics
LOG_TRACE("updating datafile statistics for collection '%s'", document->_info._name);
@ -666,6 +680,8 @@ bool CollectorThread::removeLogfiles () {
////////////////////////////////////////////////////////////////////////////////
int CollectorThread::collect (Logfile* logfile) {
TRI_ASSERT(logfile != nullptr);
LOG_TRACE("collecting logfile %llu", (unsigned long long) logfile->id());
TRI_datafile_t* df = logfile->df();
@ -744,11 +760,16 @@ int CollectorThread::collect (Logfile* logfile) {
catch (triagens::arango::Exception const& ex) {
res = ex.code();
}
catch (...) {
res = TRI_ERROR_INTERNAL;
}
if (res != TRI_ERROR_NO_ERROR &&
res != TRI_ERROR_ARANGO_DATABASE_NOT_FOUND &&
res != TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
LOG_WARNING("got unexpected error in collect: %s", TRI_errno_string(res));
LOG_WARNING("got unexpected error in CollectorThread::collect: %s", TRI_errno_string(res));
// abort early
return res;
}
}
}
@ -783,7 +804,7 @@ int CollectorThread::transferMarkers (Logfile* logfile,
triagens::arango::CollectionGuard collectionGuard(vocbase, collectionId, ! _inRecovery);
TRI_vocbase_col_t* collection = collectionGuard.collection();
TRI_ASSERT(collection != nullptr);
TRI_document_collection_t* document = collection->_collection;
TRI_ASSERT(document != nullptr);
@ -797,7 +818,6 @@ int CollectorThread::transferMarkers (Logfile* logfile,
totalOperationsCount,
operations.size());
int res = TRI_ERROR_INTERNAL;
try {
@ -808,15 +828,21 @@ int CollectorThread::transferMarkers (Logfile* logfile,
res = syncDatafileCollection(document);
// note: cache is passed by reference and can be modified by queueOperations
// (i.e. set to nullptr!)
queueOperations(logfile, cache);
}
}
catch (triagens::arango::Exception const& ex) {
res = ex.code();
}
catch (...) {
res = TRI_ERROR_INTERNAL;
}
if (cache != nullptr) {
// prevent memleak
cache->freeBarriers();
delete cache;
}
@ -1115,6 +1141,7 @@ int CollectorThread::syncDatafileCollection (TRI_document_collection_t* document
// only place that's ever written to. if a journal is full, it will have been
// sealed and synced already
size_t const n = collection->_journals._length;
for (size_t i = 0; i < n; ++i) {
TRI_datafile_t* datafile = static_cast<TRI_datafile_t*>(collection->_journals._buffer[i]);
@ -1204,9 +1231,11 @@ char* CollectorThread::nextFreeMarkerPosition (TRI_document_collection_t* docume
TRI_datafile_t* datafile = TRI_CreateJournalDocumentCollection(document, tick, targetSize);
if (datafile == nullptr) {
LOG_ERROR("unable to create journal file");
// could not create a datafile
break;
int res = TRI_errno();
// could not create a datafile, this is a serious error
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
THROW_ARANGO_EXCEPTION(res);
}
}
@ -1226,10 +1255,17 @@ leave:
// create a local datafile info struct
createDfi(cache, datafile->_fid);
cache->addBarrier(TRI_CreateBarrierElement(&document->_barrierList));
if (! _inRecovery) {
// we only need the barriers when we are outside the recovery
// the compactor will not run during recovery
cache->addBarrier(TRI_CreateBarrierElement(&document->_barrierList));
}
}
}
else {
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_NO_JOURNAL);
}
return dst;
}

View File

@ -227,7 +227,8 @@ namespace triagens {
////////////////////////////////////////////////////////////////////////////////
inline bool canBeCollected () const {
return (_status == StatusType::SEALED);
return (_status == StatusType::SEALED ||
_status == StatusType::COLLECTION_REQUESTED);
}
////////////////////////////////////////////////////////////////////////////////
@ -279,7 +280,15 @@ namespace triagens {
}
////////////////////////////////////////////////////////////////////////////////
/// @brief change the logfile status
/// @brief change the logfile status, without assertions
////////////////////////////////////////////////////////////////////////////////
void forceStatus (StatusType status) {
_status = status;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief change the logfile status, with assertions
////////////////////////////////////////////////////////////////////////////////
void setStatus (StatusType status) {

View File

@ -680,16 +680,7 @@ SlotInfo LogfileManager::allocate (void const* src,
uint32_t size) {
if (! _allowWrites) {
// no writes allowed
// check if this is a shape or attribute marker, which is allowed even in
// read-only mode
TRI_df_marker_t const* marker = static_cast<TRI_df_marker_t const*>(src);
if (marker->_type != TRI_WAL_MARKER_ATTRIBUTE &&
marker->_type != TRI_WAL_MARKER_SHAPE) {
return SlotInfo(TRI_ERROR_ARANGO_READ_ONLY);
}
// fallthrough
return SlotInfo(TRI_ERROR_ARANGO_READ_ONLY);
}
if (size > MaxEntrySize()) {
@ -748,6 +739,35 @@ SlotInfoCopy LogfileManager::allocateAndWrite (void* src,
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief set all open logfiles to status sealed
////////////////////////////////////////////////////////////////////////////////
void LogfileManager::setAllSealed () {
READ_LOCKER(_logfilesLock);
for (auto it = _logfiles.begin(); it != _logfiles.end(); ++it) {
Logfile* logfile = (*it).second;
if (logfile != nullptr) {
Logfile::StatusType status = logfile->status();
if (status == Logfile::StatusType::OPEN ||
status == Logfile::StatusType::SEAL_REQUESTED) {
// set all logfiles to sealed status so they can be collected
// we don't care about the previous status here
logfile->forceStatus(Logfile::StatusType::SEALED);
if (logfile->id() > _lastSealedId) {
// adjust last sealed id
_lastSealedId = logfile->id();
}
}
}
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief finalise and seal the currently open logfile
/// this is useful to ensure that any open writes up to this point have made
@ -757,31 +777,55 @@ SlotInfoCopy LogfileManager::allocateAndWrite (void* src,
int LogfileManager::flush (bool waitForSync,
bool waitForCollector,
bool writeShutdownFile) {
Logfile::IdType currentLogfileId;
TRI_ASSERT(! _inRecovery);
Logfile::IdType lastOpenLogfileId;
Logfile::IdType lastSealedLogfileId;
{
READ_LOCKER(_logfilesLock);
currentLogfileId = _lastOpenedId;
lastOpenLogfileId = _lastOpenedId;
lastSealedLogfileId = _lastSealedId;
}
if (lastOpenLogfileId == 0) {
return TRI_ERROR_NO_ERROR;
}
LOG_TRACE("about to flush active WAL logfile. currentLogfileId: %llu, waitForSync: %d, waitForCollector: %d",
(unsigned long long) currentLogfileId,
(unsigned long long) lastOpenLogfileId,
(int) waitForSync,
(int) waitForCollector);
int res = _slots->flush(waitForSync);
if (res == TRI_ERROR_NO_ERROR || res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
if (waitForCollector) {
this->waitForCollector(currentLogfileId);
}
if (res != TRI_ERROR_NO_ERROR &&
res != TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
LOG_ERROR("unexpected error in WAL flush request: %s", TRI_errno_string(res));
return res;
}
if (writeShutdownFile) {
// update the file with the last tick, last sealed etc.
return writeShutdownInfo(false);
if (waitForCollector) {
if (res == TRI_ERROR_NO_ERROR) {
// we need to wait for the collector...
this->waitForCollector(lastOpenLogfileId);
}
else if (res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
// current logfile is empty and cannot be collected
// we need to wait for the collector to collect the previously sealed datafile
if (lastSealedLogfileId > 0) {
this->waitForCollector(lastSealedLogfileId);
}
}
}
return res;
if (writeShutdownFile) {
// update the file with the last tick, last sealed etc.
return writeShutdownInfo(false);
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
@ -954,6 +998,11 @@ Logfile* LogfileManager::getWriteableLogfile (uint32_t size,
static const uint64_t SleepTime = 10 * 1000;
static const uint64_t MaxIterations = 1000;
size_t iterations = 0;
TRI_DEBUG_INTENTIONAL_FAIL_IF("LogfileManagerGetWriteableLogfile") {
// intentionally don't return a logfile
return nullptr;
}
while (++iterations < 1000) {
{
@ -982,7 +1031,6 @@ Logfile* LogfileManager::getWriteableLogfile (uint32_t size,
// and physically remove the file
// note: this will also delete the logfile object!
removeLogfile(logfile, false);
}
else {
++it;
@ -1103,6 +1151,12 @@ void LogfileManager::setCollectionRequested (Logfile* logfile) {
{
WRITE_LOCKER(_logfilesLock);
if (logfile->status() == Logfile::StatusType::COLLECTION_REQUESTED) {
// the collector already asked for this file, but couldn't process it
// due to some exception
return;
}
logfile->setStatus(Logfile::StatusType::COLLECTION_REQUESTED);
}
@ -1229,6 +1283,10 @@ bool LogfileManager::runRecovery () {
_droppedDatabases = state.droppedDatabases;
_droppedCollections = state.droppedCollections;
}
// "seal" any open logfiles so the collector can copy over everything
this->setAllSealed();
int res = startCollectorThread();
@ -1239,10 +1297,8 @@ bool LogfileManager::runRecovery () {
TRI_ASSERT(_collectorThread != nullptr);
LOG_TRACE("issuing recovery flush request");
// flush any open logfiles so the collector can copy over everything
this->flush(true, true, false);
LOG_TRACE("waiting for collector to catch up");
waitForCollector(_lastOpenedId);
{
// reset the list of failed transactions
@ -1252,6 +1308,16 @@ bool LogfileManager::runRecovery () {
_droppedCollections.clear();
}
// finished recovery
_inRecovery = false;
// from now on, we allow writes to the logfile
allowWrites(true);
// tell the collector that the recovery is over now
_collectorThread->recoveryDone();
_allocatorThread->recoveryDone();
// unload all collections to reset statistics, start compactor threads etc.
res = TRI_InitDatabasesServer(_server);
@ -1260,17 +1326,6 @@ bool LogfileManager::runRecovery () {
return false;
}
// TODO: how long must we sleep here?
sleep(3);
// tell the collector that the recovery is over now
_inRecovery = false;
_collectorThread->recoveryDone();
_allocatorThread->recoveryDone();
// from now on, we allow writes to the logfile
allowWrites(true);
if (logfilesToCollect > 0) {
LOG_INFO("WAL recovery finished successfully");
}
@ -1607,13 +1662,17 @@ int LogfileManager::openLogfiles () {
id > _lastSealedId) {
_lastSealedId = id;
}
if ((logfile->status() == Logfile::StatusType::SEALED || logfile->status() == Logfile::StatusType::OPEN) &&
id > _lastOpenedId) {
_lastOpenedId = id;
}
(*it).second = logfile;
++it;
}
_lastOpenedId = _lastSealedId;
return TRI_ERROR_NO_ERROR;
}

View File

@ -182,6 +182,14 @@ struct RecoverState {
return _reserveLogfiles;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief set the number of reserve logfiles
////////////////////////////////////////////////////////////////////////////////
inline void reserveLogfiles (uint32_t value) {
_reserveLogfiles = value;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief get the number of historic logfiles to keep
////////////////////////////////////////////////////////////////////////////////
@ -287,6 +295,12 @@ struct RecoverState {
uint32_t,
bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief set all open logfiles to status sealed
////////////////////////////////////////////////////////////////////////////////
void setAllSealed ();
////////////////////////////////////////////////////////////////////////////////
/// @brief finalise and seal the currently open logfile
/// this is useful to ensure that any open writes up to this point have made
@ -409,6 +423,14 @@ struct RecoverState {
_allowWrites = value;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not we are in the recovery mode
////////////////////////////////////////////////////////////////////////////////
inline bool isInRecovery () const {
return _inRecovery;
}
// -----------------------------------------------------------------------------
// --SECTION-- private methods
// -----------------------------------------------------------------------------

View File

@ -118,11 +118,12 @@ Slot::TickType Slots::lastCommittedTick () {
SlotInfo Slots::nextUnused (uint32_t size) {
// we need to use the aligned size for writing
uint32_t alignedSize = TRI_DF_ALIGN_BLOCK(size);
int iterations = 0;
bool hasWaited = false;
TRI_ASSERT(size > 0);
while (true) {
while (++iterations < 1000) {
{
MUTEX_LOCKER(_lock);
@ -152,6 +153,8 @@ SlotInfo Slots::nextUnused (uint32_t size) {
// advance to next slot
slot = &_slots[_handoutIndex];
_logfileManager->setLogfileSealRequested(_logfile);
_logfile = nullptr;
}
// fetch the next free logfile (this may create a new one)
@ -159,6 +162,11 @@ SlotInfo Slots::nextUnused (uint32_t size) {
if (_logfile == nullptr) {
usleep(10 * 1000);
TRI_DEBUG_INTENTIONAL_FAIL_IF("LogfileManagerGetWriteableLogfile") {
return SlotInfo(TRI_ERROR_ARANGO_NO_JOURNAL);
}
// try again in next iteration
}
else if (status == Logfile::StatusType::EMPTY) {
@ -210,6 +218,8 @@ SlotInfo Slots::nextUnused (uint32_t size) {
guard.wait(10 * 1000);
}
}
return SlotInfo(TRI_ERROR_ARANGO_NO_JOURNAL);
}
////////////////////////////////////////////////////////////////////////////////
@ -267,7 +277,7 @@ SyncRegion Slots::getSyncRegion () {
break;
}
// LOG_INFO("group commit");
// this is a group commit!!
// update the region
region.size += (uint32_t) (static_cast<char*>(slot->mem()) - (region.mem + region.size) + slot->size());
@ -345,13 +355,14 @@ void Slots::returnSyncRegion (SyncRegion const& region) {
int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
bool& worked) {
int iterations = 0;
bool hasWaited = false;
worked = false;
while (true) {
while (++iterations < 1000) {
{
MUTEX_LOCKER(_lock);
lastCommittedTick = _lastCommittedTick;
Slot* slot = &_slots[_handoutIndex];
@ -375,16 +386,20 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
int res = writeFooter(slot);
if (res != TRI_ERROR_NO_ERROR) {
LOG_ERROR("could not write logfile footer: %s", TRI_errno_string(res));
return res;
}
_logfileManager->setLogfileSealRequested(_logfile);
// advance to next slot
slot = &_slots[_handoutIndex];
// invalidate the logfile so for the next write we'll use a
// new one
_logfile = nullptr;
worked = true;
return TRI_ERROR_NO_ERROR;
// fall-through intentional
}
TRI_ASSERT(_logfile == nullptr);
@ -394,6 +409,10 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
Logfile::StatusType status = newLogfile(1);
if (_logfile == nullptr) {
TRI_DEBUG_INTENTIONAL_FAIL_IF("LogfileManagerGetWriteableLogfile") {
return TRI_ERROR_ARANGO_NO_JOURNAL;
}
usleep(10 * 1000);
// try again in next iteration
}
@ -402,11 +421,15 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
int res = writeHeader(slot);
if (res != TRI_ERROR_NO_ERROR) {
LOG_ERROR("could not write logfile header: %s", TRI_errno_string(res));
return res;
}
// advance to next slot
slot = &_slots[_handoutIndex];
_logfileManager->setLogfileOpen(_logfile);
worked = false;
worked = true;
return TRI_ERROR_NO_ERROR;
}
else {
@ -434,6 +457,8 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
guard.wait(10 * 1000);
}
}
return TRI_ERROR_ARANGO_NO_JOURNAL;
}
////////////////////////////////////////////////////////////////////////////////
@ -441,7 +466,7 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
////////////////////////////////////////////////////////////////////////////////
int Slots::writeHeader (Slot* slot) {
TRI_df_header_marker_t header = _logfile->getHeaderMarker();
TRI_df_header_marker_t&& header = _logfile->getHeaderMarker();
size_t const size = header.base._size;
TRI_df_marker_t* mem = reinterpret_cast<TRI_df_marker_t*>(_logfile->reserve(size));
@ -461,7 +486,7 @@ int Slots::writeHeader (Slot* slot) {
int Slots::writeFooter (Slot* slot) {
TRI_ASSERT(_logfile != nullptr);
TRI_df_footer_marker_t footer = _logfile->getFooterMarker();
TRI_df_footer_marker_t&& footer = _logfile->getFooterMarker();
size_t const size = footer.base._size;
TRI_df_marker_t* mem = reinterpret_cast<TRI_df_marker_t*>(_logfile->reserve(size));

View File

@ -1,7 +1,5 @@
/*jslint indent: 2, maxlen: 120, vars: true, white: true, plusplus: true, nonpropdel: true, nomen: true, sloppy: true */
/*global require, assertEqual, assertNotEqual,
print, print_plain, COMPARE_STRING, NORMALIZE_STRING,
help, start_pager, stop_pager, start_pretty_print, stop_pretty_print, start_color_print, stop_color_print */
/*global require, assertEqual */
////////////////////////////////////////////////////////////////////////////////
/// @brief tests for client-specific functionality
@ -41,38 +39,49 @@ function changeOperationModePositiveCaseTestSuite () {
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
},
tearDown : function () {
// reset server-mode after executing this test
db._executeTransaction({
collections: { },
action: function () {
var db = require('internal').db;
db._changeMode('Normal');
}
});
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tests if the change of the operation mode of the arango server
/// can be done.
/// Note: this test needs an arango server with endpoint unix:...
/// See target unittests-shell-client-readonly
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief tests if the change of the operation mode of the arango server
/// can be done.
/// Note: this test needs an arango server with endpoint unix:...
/// See target unittests-shell-client-readonly
////////////////////////////////////////////////////////////////////////////////
testChangeMode : function () {
var result =
db._executeTransaction({collections: {},
action: function () {
var db = require('internal').db;
var result = db._changeMode('ReadOnly');
return result;
}
});
assertTrue(result);
}
testChangeMode : function () {
// we need to use a transaction (the transaction is shipped to the
// server and executed there) to execute the changeMode function call on
// the server...
var result = db._executeTransaction({
collections: { },
action: function () {
var db = require('internal').db;
var result = db._changeMode('ReadOnly');
return result;
}
});
assertTrue(result);
}
};
}

View File

@ -1,7 +1,5 @@
/*jslint indent: 2, maxlen: 120, vars: true, white: true, plusplus: true, nonpropdel: true, nomen: true, sloppy: true */
/*global require, assertEqual, assertNotEqual,
print, print_plain, COMPARE_STRING, NORMALIZE_STRING,
help, start_pager, stop_pager, start_pretty_print, stop_pretty_print, start_color_print, stop_color_print */
/*global require, assertEqual */
////////////////////////////////////////////////////////////////////////////////
/// @brief tests for client-specific functionality
@ -42,42 +40,44 @@ function changeOperationModeNegativeCaseTestSuite () {
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
},
tearDown : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tests if the change of the operation mode of the arango server
/// can be done.
/// Note: this test needs an arango server with endpoint unix:...
/// See target unittests-shell-client-readonly
////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////
/// @brief tests if the change of the operation mode of the arango server
/// can be done.
/// Note: this test needs an arango server with endpoint unix:...
/// See target unittests-shell-client-readonly
////////////////////////////////////////////////////////////////////////////////
testTryChangeMode : function () {
var modified = true;
try {
db._executeTransaction({collections: {},
action: function () {
var db = require('internal').db;
var result = db._changeMode('ReadOnly');
return result;
}
});} catch(e) {
assertEqual(arangodb.errors.ERROR_FORBIDDEN.code, e.errorNum);
modified = false;
}
assertFalse(modified);
}
testTryChangeMode : function () {
try {
db._executeTransaction({
collections: {},
action: function () {
var db = require('internal').db;
var result = db._changeMode('ReadOnly');
return result;
}
});
fail();
}
catch (e) {
assertEqual(arangodb.errors.ERROR_FORBIDDEN.code, e.errorNum);
}
}
};
}

View File

@ -63,10 +63,16 @@ function createStatisticsCollection (name) {
var collection = db._collection(name);
if (collection === null) {
var r = db._create(name, { isSystem: true, waitForSync: false });
var r = null;
try {
r = db._create(name, { isSystem: true, waitForSync: false });
}
catch (err) {
}
if (! r) {
return;
return false;
}
collection = db._collection(name);
@ -75,6 +81,8 @@ function createStatisticsCollection (name) {
if (collection !== null) {
collection.ensureSkiplist("time");
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
@ -91,7 +99,7 @@ function createStatisticsCollections () {
'use strict';
if (initialized) {
return;
return true;
}
initialized = true;
@ -100,8 +108,12 @@ function createStatisticsCollections () {
var i;
for (i = 0; i < names.length; ++i) {
createStatisticsCollection(names[i]);
if (! createStatisticsCollection(names[i])) {
return false;
}
}
return true;
}
////////////////////////////////////////////////////////////////////////////////
@ -478,7 +490,9 @@ exports.STATISTICS_HISTORY_INTERVAL = 15 * 60;
exports.historian = function () {
"use strict";
createStatisticsCollections();
if (! createStatisticsCollections()) {
return;
}
var statsRaw = db._statisticsRaw;
var statsCol = db._statistics;
@ -537,7 +551,9 @@ exports.historian = function () {
exports.historianAverage = function () {
"use strict";
createStatisticsCollections();
if (! createStatisticsCollections()) {
return;
}
var stats15m = db._statistics15;
@ -589,7 +605,9 @@ exports.historianAverage = function () {
exports.garbageCollector = function () {
'use strict';
createStatisticsCollections();
if (! createStatisticsCollections()) {
return;
}
var time = internal.time();

View File

@ -55,10 +55,21 @@ exports.Helper = {
collection.unload();
internal.flushWal();
var iterations = 0;
while (collection.status() !== arangodb.ArangoCollection.STATUS_UNLOADED) {
collection.unload();
internal.wait(1);
internal.wait(0.25);
++iterations;
if (iterations === 20) {
require("console").log("waiting for collection " + collection.name() + " to unload");
}
else if (iterations === 400) {
throw "waited too long for unload of collection " + collection.name();
}
}
},

View File

@ -17,7 +17,12 @@ runJSUnityTests = function (tests) {
var result = true;
_.each(tests, function (file) {
print("\nRunning JSUnity test from file '" + file + "'");
if (result) {
print("\nRunning JSUnity test from file '" + file + "'");
}
else {
print("\nSkipping JSUnity test from file '" + file + "' due to previous errors");
}
try {
result = result && runTest(file);

View File

@ -27,6 +27,7 @@
var jsunity = require("jsunity");
var internal = require("internal");
var testHelper = require("org/arangodb/test-helper").Helper;
// -----------------------------------------------------------------------------
// --SECTION-- collection methods
@ -138,9 +139,9 @@ function CollectionVolatileSuite () {
c.save({"test": true});
assertEqual(1, c.count());
c.unload();
testHelper.waitUnload(c);
internal.wait(4);
assertEqual(true, c.properties().isVolatile);
assertEqual(0, c.count());
},
@ -159,8 +160,7 @@ function CollectionVolatileSuite () {
assertEqual(10000, c.count());
c.unload();
c = null;
testHelper.waitUnload(c);
internal.wait(5);
c = internal.db[cn];

View File

@ -1,7 +1,7 @@
/*jslint indent: 2, nomen: true, maxlen: 120, sloppy: true, vars: true, white: true, plusplus: true, nonpropdel: true */
/*global require, db, ArangoCollection, ArangoDatabase, ArangoCursor, module,
ShapedJson, RELOAD_AUTH, SYS_DEFINE_ACTION, SYS_EXECUTE_GLOBAL_CONTEXT_FUNCTION,
AHUACATL_RUN, AHUACATL_PARSE, AHUACATL_EXPLAIN, WAL_FLUSH */
AHUACATL_RUN, AHUACATL_PARSE, AHUACATL_EXPLAIN, WAL_FLUSH, WAL_ADJUST */
////////////////////////////////////////////////////////////////////////////////
/// @brief module "internal"
@ -111,6 +111,15 @@
delete WAL_FLUSH;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief adjusts the write-ahead log configuration
////////////////////////////////////////////////////////////////////////////////
if (typeof WAL_ADJUST !== "undefined") {
internal.adjustWal = WAL_ADJUST;
delete WAL_ADJUST;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief defines an action
////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,89 @@
var db = require("org/arangodb").db;
var internal = require("internal");
var jsunity = require("jsunity");
function runSetup () {
internal.debugClearFailAt();
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery");
internal.debugSetFailAt("CreateJournalDocumentCollection");
db._executeTransaction({
collections: {
write: "UnitTestsRecovery"
},
action: function () {
var db = require("org/arangodb").db;
var i, c = db._collection("UnitTestsRecovery");
for (i = 0; i < 100000; ++i) {
c.save({ _key: "test" + i, value1: "test" + i, value2: i }, true); // wait for sync
}
for (i = 0; i < 100000; i += 2) {
c.remove("test" + i, true);
}
}
});
internal.flushWal();
internal.wait(5);
internal.debugSegfault("crashing server");
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {
},
tearDown: function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we can restore the trx data
////////////////////////////////////////////////////////////////////////////////
testDiskFullNoJournal : function () {
var i, c = db._collection("UnitTestsRecovery");
assertEqual(50000, c.count());
for (i = 0; i < 100000; ++i) {
if (i % 2 == 0) {
assertFalse(c.exists("test" + i));
}
else {
assertEqual("test" + i, c.document("test" + i)._key);
assertEqual("test" + i, c.document("test" + i).value1);
assertEqual(i, c.document("test" + i).value2);
}
}
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (argv[1] === "setup") {
runSetup();
return 0;
}
else {
jsunity.run(recoverySuite);
return jsunity.done() ? 0 : 1;
}
}

View File

@ -0,0 +1,78 @@
var db = require("org/arangodb").db;
var internal = require("internal");
var jsunity = require("jsunity");
function runSetup () {
internal.debugClearFailAt();
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery"), i;
internal.debugSetFailAt("LogfileManagerWriteShutdown");
db._executeTransaction({
collections: {
write: [ "UnitTestsRecovery" ]
},
action: function () {
var db = require("org/arangodb").db, i;
var c = db._collection("UnitTestsRecovery");
for (i = 0; i < 200000; ++i) {
c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i });
}
}
});
internal.debugSegfault("crashing server");
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {
},
tearDown: function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we can restore the 10 collections
////////////////////////////////////////////////////////////////////////////////
testNoShutdownInfoMultipleLogs : function () {
var c = db._collection("UnitTestsRecovery");
assertEqual(200000, c.count());
var i;
for (i = 0; i < 200000; ++i) {
assertEqual(i, c.document("test" + i).value1);
assertEqual("foobarbaz" + i, c.document("test" + i).value2);
}
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (argv[1] === "setup") {
runSetup();
return 0;
}
else {
jsunity.run(recoverySuite);
return jsunity.done() ? 0 : 1;
}
}

View File

@ -0,0 +1,88 @@
var db = require("org/arangodb").db;
var internal = require("internal");
var jsunity = require("jsunity");
function runSetup () {
internal.debugClearFailAt();
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery"), i;
internal.debugSetFailAt("LogfileManagerWriteShutdown");
for (i = 0; i < 100; ++i) {
c.save({ _key: "old" + i, a: i });
}
db._executeTransaction({
collections: {
write: [ "UnitTestsRecovery" ]
},
action: function () {
var db = require("org/arangodb").db;
var c = db._collection("UnitTestsRecovery");
var i;
for (i = 0; i < 10000; ++i) {
c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i }, true);
}
for (i = 0; i < 50; ++i) {
c.remove("old" + i);
}
}
});
internal.debugSegfault("crashing server");
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {
},
tearDown: function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we can restore the 10 collections
////////////////////////////////////////////////////////////////////////////////
testNoShutdownInfoNoFlush : function () {
var c = db._collection("UnitTestsRecovery");
assertEqual(10050, c.count());
var i;
for (i = 0; i < 10000; ++i) {
assertEqual(i, c.document("test" + i).value1);
assertEqual("foobarbaz" + i, c.document("test" + i).value2);
}
for (i = 50; i < 100; ++i) {
assertEqual(i, c.document("old" + i).a);
}
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (argv[1] === "setup") {
runSetup();
return 0;
}
else {
jsunity.run(recoverySuite);
return jsunity.done() ? 0 : 1;
}
}

View File

@ -9,13 +9,11 @@ function runSetup () {
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery"), i;
for (i = 0; i < 1000; ++i) {
c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i });
}
internal.debugSetFailAt("LogfileManagerWriteShutdown");
// flush the logfile but do not write shutdown info
internal.flushWal(true, true);

View File

@ -4035,6 +4035,7 @@ function transactionServerFailuresSuite () {
write: [ cn ],
},
action: function () {
var i;
for (i = 100; i < 150; ++i) {
c.save({ _key: "test" + i, a: i });
}
@ -4062,6 +4063,121 @@ function transactionServerFailuresSuite () {
assertEqual(undefined, c.document("test" + i).b);
}
});
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test disk full during collection
////////////////////////////////////////////////////////////////////////////////
testDiskFullWhenCollectingTransaction : function () {
internal.debugClearFailAt();
db._drop(cn);
c = db._create(cn);
// should not cause any problems yet, but later
internal.debugSetFailAt("CreateJournalDocumentCollection");
// adjust the configuration and make sure we flush all reserve logfiles
internal.adjustWal({ reserveLogfiles: 1 });
var i;
for (i = 0; i < 100; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(100, c.count());
for (i = 0; i < 4; ++i) {
// write something into the logs so we can flush 'em
c.save({ _key: "foo" });
c.remove("foo");
internal.flushWal(true, false);
}
// one more to populate a new logfile
c.save({ _key: "foo" });
c.remove("foo");
TRANSACTION({
collections: {
write: [ cn ],
},
action: function () {
var i;
for (i = 100; i < 150; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(150, c.count());
// make sure we fill up the logfile
for (i = 0; i < 100000; ++i) {
c.save({ _key: "foo" + i, value: "the quick brown foxx jumped over the lazy dog" });
}
}
});
assertEqual(100150, c.count());
var fig = c.figures();
assertEqual(100160, fig.uncollectedLogfileEntries);
internal.debugClearFailAt();
internal.flushWal(true, true);
assertEqual(100150, c.count());
testHelper.waitUnload(c);
// data should be there after unload
assertEqual(100150, c.count());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test: disk full during transaction
////////////////////////////////////////////////////////////////////////////////
testDiskFullDuringTransaction : function () {
internal.debugClearFailAt();
db._drop(cn);
c = db._create(cn);
var i;
for (i = 0; i < 100; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(100, c.count());
internal.flushWal(true, true);
try {
TRANSACTION({
collections: {
write: [ cn ],
},
action: function () {
var i;
for (i = 100; i < 150; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(150, c.count());
// should not cause any problems
internal.debugSetFailAt("LogfileManagerGetWriteableLogfile");
for (i = 0; i < 200000; ++i) {
c.save({ _key: "foo" + i, value: "the quick brown foxx jumped over the lazy dog" });
}
fail();
}
});
}
catch (err) {
assertEqual(internal.errors.ERROR_ARANGO_NO_JOURNAL.code, err.errorNum);
}
assertEqual(100, c.count());
internal.debugClearFailAt();
}
};
@ -4075,6 +4191,11 @@ function transactionServerFailuresSuite () {
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
// only run this test suite if server-side failures are enabled
if (internal.debugCanUseFailAt()) {
jsunity.run(transactionServerFailuresSuite);
}
jsunity.run(transactionInvocationSuite);
jsunity.run(transactionCollectionsSuite);
jsunity.run(transactionOperationsSuite);
@ -4085,11 +4206,6 @@ jsunity.run(transactionCountSuite);
jsunity.run(transactionCrossCollectionSuite);
jsunity.run(transactionConstraintsSuite);
// only run this test suite if server-side failures are enabled
if (internal.debugCanUseFailAt()) {
jsunity.run(transactionServerFailuresSuite);
}
return jsunity.done();
// -----------------------------------------------------------------------------

View File

@ -0,0 +1,134 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test the collection interface
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var arangodb = require("org/arangodb");
var testHelper = require("org/arangodb/test-helper").Helper;
var db = arangodb.db;
var internal = require("internal");
// -----------------------------------------------------------------------------
// --SECTION-- wal functions
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function WalSuite () {
var cn = "UnitTestsWal";
var c;
return {
setUp: function () {
db._drop(cn);
c = db._create(cn);
},
tearDown: function () {
db._drop(cn);
c = null;
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test max tick
////////////////////////////////////////////////////////////////////////////////
testMaxTickEmptyCollection : function () {
var fig = c.figures();
// we shouldn't have a tick yet
assertEqual("0", fig.lastTick);
assertEqual(0, fig.uncollectedLogfileEntries);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief max tick
////////////////////////////////////////////////////////////////////////////////
testMaxTickNonEmptyCollection : function () {
internal.flushWal(true, true);
var i;
for (i = 0; i < 100; ++i) {
c.save({ test: i });
}
// we shouldn't have a tick yet
var fig = c.figures();
assertEqual("0", fig.lastTick);
assertTrue(fig.uncollectedLogfileEntries > 0);
internal.flushWal(true, true);
// now we should have a tick
fig = c.figures();
assertNotEqual("0", fig.lastTick);
assertEqual(0, fig.uncollectedLogfileEntries);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief max tick
////////////////////////////////////////////////////////////////////////////////
testMaxTickAfterUnload : function () {
var i;
for (i = 0; i < 100; ++i) {
c.save({ test: i });
}
internal.flushWal(true, true);
testHelper.waitUnload(c);
// we should have a tick and no uncollected entries
fig = c.figures();
assertNotEqual("0", fig.lastTick);
assertEqual(0, fig.uncollectedLogfileEntries);
}
};
}
// -----------------------------------------------------------------------------
// --SECTION-- main
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
jsunity.run(WalSuite);
return jsunity.done();
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// @addtogroup\\|// --SECTION--\\|/// @page\\|/// @}\\)"
// End: