1
0
Fork 0

fixed tests

This commit is contained in:
Jan Steemann 2014-06-18 16:34:11 +02:00
parent 0567e64f2f
commit 8b9cdbe075
30 changed files with 1165 additions and 334 deletions

View File

@ -112,8 +112,7 @@ SERVER_OPT := \
--javascript.app-path @top_srcdir@/js/apps \ --javascript.app-path @top_srcdir@/js/apps \
--javascript.startup-directory @top_srcdir@/js \ --javascript.startup-directory @top_srcdir@/js \
--log.file "" \ --log.file "" \
--ruby.action-directory @top_srcdir@/mr/actions \ --wal.reserve-logfiles 1 \
--ruby.modules-path @top_srcdir@/mr/server/modules:@top_srcdir@/mr/common/modules \
--server.threads 4 \ --server.threads 4 \
$(SERVER_START) $(SERVER_START)
@ -201,8 +200,8 @@ RECOVERY_TESTS = \
execute-recovery-test: execute-recovery-test:
@rm -rf "$(VOCDIR)" @rm -rf "$(VOCDIR)"
@mkdir -p "$(VOCDIR)/databases" @mkdir -p "$(VOCDIR)/databases"
@builddir@/bin/arangod "$(VOCDIR)" --no-server $(SERVER_OPT) --server.threads 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter setup || true # the server will crash with segfault intentionally in this test @builddir@/bin/arangod "$(VOCDIR)" --no-server $(SERVER_OPT) --server.threads 1 --wal.reserve-logfiles 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter setup || true # the server will crash with segfault intentionally in this test
$(VALGRIND) @builddir@/bin/arangod --no-server "$(VOCDIR)" $(SERVER_OPT) --server.threads 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter recover || test "x$(FORCE)" == "x1" $(VALGRIND) @builddir@/bin/arangod --no-server "$(VOCDIR)" $(SERVER_OPT) --server.threads 1 --wal.reserve-logfiles 1 --javascript.script "@top_srcdir@/js/server/tests/recovery/$(RECOVERY_SCRIPT).js" --javascript.script-parameter recover || test "x$(FORCE)" == "x1"
unittests-recovery: unittests-recovery:
@echo @echo
@ -211,16 +210,19 @@ unittests-recovery:
@echo "================================================================================" @echo "================================================================================"
@echo @echo
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="multi-database-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="disk-full-no-collection-journal"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info-with-flush"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info-no-flush"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info-multiple-logs"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="insert-update-remove" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="insert-update-remove"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="insert-update-remove-distance" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="insert-update-remove-distance"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="multi-database-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="big-transaction-durability" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="big-transaction-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="transaction-durability" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="transaction-durability"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="transaction-durability-multiple" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="transaction-durability-multiple"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="corrupt-wal-marker-multiple" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="corrupt-wal-marker-multiple"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="corrupt-wal-marker-single" $(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="corrupt-wal-marker-single"
$(MAKE) execute-recovery-test PID=$(PID) RECOVERY_SCRIPT="no-shutdown-info" @rm -rf "$(VOCDIR)" core
@rm -rf "$(VOCDIR)"
@echo @echo
else else
@ -414,6 +416,7 @@ SHELL_COMMON = \
SHELL_SERVER_ONLY = \ SHELL_SERVER_ONLY = \
@top_srcdir@/js/server/tests/shell-readonly.js \ @top_srcdir@/js/server/tests/shell-readonly.js \
@top_srcdir@/js/server/tests/shell-wal-noncluster.js \
@top_srcdir@/js/server/tests/shell-sharding-helpers.js \ @top_srcdir@/js/server/tests/shell-sharding-helpers.js \
@top_srcdir@/js/server/tests/shell-compaction-noncluster.js \ @top_srcdir@/js/server/tests/shell-compaction-noncluster.js \
@top_srcdir@/js/server/tests/shell-tasks.js \ @top_srcdir@/js/server/tests/shell-tasks.js \

View File

@ -832,6 +832,8 @@ int ArangoServer::startupServer () {
// finally flush the write-ahead log so all data in the WAL goes into the collections // finally flush the write-ahead log so all data in the WAL goes into the collections
wal::LogfileManager::instance()->flush(true, true, true); wal::LogfileManager::instance()->flush(true, true, true);
// WAL recovery done after here
// setup the V8 actions // setup the V8 actions
if (startServer) { if (startServer) {
_applicationV8->prepareActions(); _applicationV8->prepareActions();
@ -874,9 +876,6 @@ int ArangoServer::startupServer () {
_applicationServer->start(); _applicationServer->start();
// load authentication
TRI_LoadAuthInfoVocBase(vocbase);
// if the authentication info could not be loaded, but authentication is turned on, // if the authentication info could not be loaded, but authentication is turned on,
// then we refuse to start // then we refuse to start
if (! vocbase->_authInfoLoaded && ! _disableAuthentication) { if (! vocbase->_authInfoLoaded && ! _disableAuthentication) {

View File

@ -3781,11 +3781,31 @@ static v8::Handle<v8::Value> JS_Transaction (v8::Arguments const& argv) {
return scope.Close(result); return scope.Close(result);
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief adjusts the WAL configuration at runtime
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_AdjustWal (v8::Arguments const& argv) {
v8::HandleScope scope;
if (argv.Length() != 1 || ! argv[0]->IsObject()) {
TRI_V8_EXCEPTION_USAGE(scope, "adjustWal(<object>)");
}
v8::Handle<v8::Object> object = v8::Handle<v8::Object>::Cast(argv[0]);
if (object->Has(TRI_V8_STRING("reserveLogfiles"))) {
uint32_t logfiles = static_cast<uint32_t>(TRI_ObjectToUInt64(object->Get(TRI_V8_STRING("reserveLogfiles")), true));
triagens::wal::LogfileManager::instance()->reserveLogfiles(logfiles);
}
return scope.Close(v8::True());
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief flush the currently open WAL logfile /// @brief flush the currently open WAL logfile
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_Flush (v8::Arguments const& argv) { static v8::Handle<v8::Value> JS_FlushWal (v8::Arguments const& argv) {
v8::HandleScope scope; v8::HandleScope scope;
bool waitForSync = false; bool waitForSync = false;
@ -6262,8 +6282,8 @@ static v8::Handle<v8::Value> JS_FiguresVocbaseCol (v8::Arguments const& argv) {
indexes->Set(v8::String::New("count"), v8::Number::New((double) info->_numberIndexes)); indexes->Set(v8::String::New("count"), v8::Number::New((double) info->_numberIndexes));
indexes->Set(v8::String::New("size"), v8::Number::New((double) info->_sizeIndexes)); indexes->Set(v8::String::New("size"), v8::Number::New((double) info->_sizeIndexes));
indexes->Set(v8::String::New("lastTick"), V8TickId(info->_tickMax)); result->Set(v8::String::New("lastTick"), V8TickId(info->_tickMax));
indexes->Set(v8::String::New("uncollectedLogfileEntries"), v8::Number::New((double) info->_uncollectedLogfileEntries)); result->Set(v8::String::New("uncollectedLogfileEntries"), v8::Number::New((double) info->_uncollectedLogfileEntries));
TRI_Free(TRI_UNKNOWN_MEM_ZONE, info); TRI_Free(TRI_UNKNOWN_MEM_ZONE, info);
@ -7061,7 +7081,7 @@ static v8::Handle<v8::Value> JS_RotateVocbaseCol (v8::Arguments const& argv) {
v8::Handle<v8::Object> err; v8::Handle<v8::Object> err;
TRI_vocbase_col_t const* collection = UseCollection(argv.Holder(), &err); TRI_vocbase_col_t const* collection = UseCollection(argv.Holder(), &err);
if (collection == 0) { if (collection == nullptr) {
return scope.Close(v8::ThrowException(err)); return scope.Close(v8::ThrowException(err));
} }
@ -10132,7 +10152,8 @@ void TRI_InitV8VocBridge (v8::Handle<v8::Context> context,
TRI_AddGlobalFunctionVocbase(context, "LIST_ENDPOINTS", JS_ListEndpoints, true); TRI_AddGlobalFunctionVocbase(context, "LIST_ENDPOINTS", JS_ListEndpoints, true);
TRI_AddGlobalFunctionVocbase(context, "RELOAD_AUTH", JS_ReloadAuth, true); TRI_AddGlobalFunctionVocbase(context, "RELOAD_AUTH", JS_ReloadAuth, true);
TRI_AddGlobalFunctionVocbase(context, "TRANSACTION", JS_Transaction, true); TRI_AddGlobalFunctionVocbase(context, "TRANSACTION", JS_Transaction, true);
TRI_AddGlobalFunctionVocbase(context, "WAL_FLUSH", JS_Flush, true); TRI_AddGlobalFunctionVocbase(context, "WAL_FLUSH", JS_FlushWal, true);
TRI_AddGlobalFunctionVocbase(context, "WAL_ADJUST", JS_AdjustWal, true);
// ............................................................................. // .............................................................................
// create global variables // create global variables

View File

@ -432,16 +432,11 @@ bool TRI_InsertInitialAuthInfo (TRI_vocbase_t* vocbase) {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) { bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
TRI_vocbase_col_t* collection;
void** beg;
void** end;
void** ptr;
LOG_DEBUG("starting to load authentication and authorisation information"); LOG_DEBUG("starting to load authentication and authorisation information");
collection = TRI_LookupCollectionByNameVocBase(vocbase, TRI_COL_NAME_USERS); TRI_vocbase_col_t* collection = TRI_LookupCollectionByNameVocBase(vocbase, TRI_COL_NAME_USERS);
if (collection == NULL) { if (collection == nullptr) {
LOG_INFO("collection '_users' does not exist, no authentication available"); LOG_INFO("collection '_users' does not exist, no authentication available");
return false; return false;
} }
@ -460,26 +455,25 @@ bool TRI_LoadAuthInfo (TRI_vocbase_t* vocbase) {
TRI_WriteLockReadWriteLock(&vocbase->_authInfoLock); TRI_WriteLockReadWriteLock(&vocbase->_authInfoLock);
ClearAuthInfo(vocbase); ClearAuthInfo(vocbase);
beg = document->_primaryIndex._table; void** beg = document->_primaryIndex._table;
end = beg + document->_primaryIndex._nrAlloc; void** end = beg + document->_primaryIndex._nrAlloc;
ptr = beg; void** ptr = beg;
for (; ptr < end; ++ptr) { for (; ptr < end; ++ptr) {
if (*ptr) { if (*ptr) {
TRI_vocbase_auth_t* auth; TRI_vocbase_auth_t* auth;
TRI_doc_mptr_t const* d;
TRI_shaped_json_t shapedJson; TRI_shaped_json_t shapedJson;
d = (TRI_doc_mptr_t const*) *ptr; TRI_doc_mptr_t const* d = (TRI_doc_mptr_t const*) *ptr;
TRI_EXTRACT_SHAPED_JSON_MARKER(shapedJson, d->getDataPtr()); // PROTECTED by trx here TRI_EXTRACT_SHAPED_JSON_MARKER(shapedJson, d->getDataPtr()); // PROTECTED by trx here
auth = ConvertAuthInfo(vocbase, document, &shapedJson); auth = ConvertAuthInfo(vocbase, document, &shapedJson);
if (auth != NULL) { if (auth != nullptr) {
TRI_vocbase_auth_t* old = static_cast<TRI_vocbase_auth_t*>(TRI_InsertKeyAssociativePointer(&vocbase->_authInfo, auth->_username, auth, true)); TRI_vocbase_auth_t* old = static_cast<TRI_vocbase_auth_t*>(TRI_InsertKeyAssociativePointer(&vocbase->_authInfo, auth->_username, auth, true));
if (old != NULL) { if (old != nullptr) {
FreeAuthInfo(old); FreeAuthInfo(old);
} }
} }
@ -562,7 +556,10 @@ bool TRI_PopulateAuthInfo (TRI_vocbase_t* vocbase,
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
bool TRI_ReloadAuthInfo (TRI_vocbase_t* vocbase) { bool TRI_ReloadAuthInfo (TRI_vocbase_t* vocbase) {
return TRI_LoadAuthInfo(vocbase); bool result = TRI_LoadAuthInfo(vocbase);
vocbase->_authInfoLoaded = result;
return result;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -258,11 +258,9 @@ static TRI_datafile_t* CreateCompactor (TRI_document_collection_t* document,
if (journal == NULL) { if (journal == NULL) {
if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) { if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) {
document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP); document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP);
document->_state = TRI_COL_STATE_READ;
} }
else { else {
document->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); document->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL);
document->_state = TRI_COL_STATE_WRITE_ERROR;
} }
return NULL; return NULL;
@ -1807,6 +1805,8 @@ static bool OpenIterator (TRI_df_marker_t const* marker,
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
static int FillInternalIndexes (TRI_document_collection_t* document) { static int FillInternalIndexes (TRI_document_collection_t* document) {
TRI_ASSERT(! triagens::wal::LogfileManager::instance()->isInRecovery());
int res = TRI_ERROR_NO_ERROR; int res = TRI_ERROR_NO_ERROR;
for (size_t i = 0; i < document->_allIndexes._length; ++i) { for (size_t i = 0; i < document->_allIndexes._length; ++i) {
@ -2389,18 +2389,25 @@ TRI_datafile_t* TRI_CreateJournalDocumentCollection (TRI_document_collection_t*
TRI_FreeString(TRI_CORE_MEM_ZONE, number); TRI_FreeString(TRI_CORE_MEM_ZONE, number);
TRI_FreeString(TRI_CORE_MEM_ZONE, jname); TRI_FreeString(TRI_CORE_MEM_ZONE, jname);
TRI_DEBUG_INTENTIONAL_FAIL_IF("CreateJournalDocumentCollection") {
// simulate disk full
TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP);
errno = ENOSPC;
return nullptr;
}
journal = TRI_CreateDatafile(filename, fid, journalSize, true); journal = TRI_CreateDatafile(filename, fid, journalSize, true);
TRI_FreeString(TRI_CORE_MEM_ZONE, filename); TRI_FreeString(TRI_CORE_MEM_ZONE, filename);
} }
if (journal == NULL) { if (journal == nullptr) {
if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) { if (TRI_errno() == TRI_ERROR_OUT_OF_MEMORY_MMAP) {
document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP); document->_lastError = TRI_set_errno(TRI_ERROR_OUT_OF_MEMORY_MMAP);
document->_state = TRI_COL_STATE_READ;
} }
else { else {
document->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL); document->_lastError = TRI_set_errno(TRI_ERROR_ARANGO_NO_JOURNAL);
document->_state = TRI_COL_STATE_WRITE_ERROR;
} }
return NULL; return NULL;
@ -2815,11 +2822,16 @@ TRI_document_collection_t* TRI_OpenDocumentCollection (TRI_vocbase_t* vocbase,
TRI_InitVocShaper(document->getShaper()); // ONLY in OPENCOLLECTION, PROTECTED by fake trx here TRI_InitVocShaper(document->getShaper()); // ONLY in OPENCOLLECTION, PROTECTED by fake trx here
// secondary indexes must not be loaded during recovery
// this is because creating indexes might write attribute markers into the WAL,
// but the WAL is read-only at the point of recovery
if (! triagens::wal::LogfileManager::instance()->isInRecovery()) {
// fill internal indexes (this is, the edges index at the moment) // fill internal indexes (this is, the edges index at the moment)
FillInternalIndexes(document); FillInternalIndexes(document);
// fill user-defined secondary indexes // fill user-defined secondary indexes
TRI_IterateIndexCollection(collection, OpenIndexIterator, collection); TRI_IterateIndexCollection(collection, OpenIndexIterator, collection);
}
return document; return document;
} }
@ -3474,18 +3486,12 @@ static int ComparePidName (void const* left, void const* right) {
/// note: the write-lock for the collection must be held to call this /// note: the write-lock for the collection must be held to call this
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
void TRI_UpdateStatisticsDocumentCollection (TRI_document_collection_t* document, void TRI_UpdateRevisionDocumentCollection (TRI_document_collection_t* document,
TRI_voc_rid_t rid, TRI_voc_rid_t rid,
bool force, bool force) {
int64_t logfileEntries) {
if (rid > 0) { if (rid > 0) {
SetRevision(document, rid, force); SetRevision(document, rid, force);
} }
if (! document->_info._isVolatile) {
// only count logfileEntries if the collection is durable
document->_uncollectedLogfileEntries += logfileEntries;
}
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -676,10 +676,9 @@ void TRI_FreeDocumentCollection (TRI_document_collection_t*);
/// note: the write-lock for the collection must be held to call this /// note: the write-lock for the collection must be held to call this
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
void TRI_UpdateStatisticsDocumentCollection (TRI_document_collection_t*, void TRI_UpdateRevisionDocumentCollection (TRI_document_collection_t*,
TRI_voc_rid_t, TRI_voc_rid_t,
bool, bool);
int64_t);
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not a collection is fully collected /// @brief whether or not a collection is fully collected

View File

@ -513,7 +513,7 @@ static int OpenDatabases (TRI_server_t* server,
size_t i, n; size_t i, n;
int res; int res;
if (server->_iterateMarkersOnOpen) { if (server->_iterateMarkersOnOpen && ! server->_hasCreatedSystemDatabase) {
LOG_WARNING("no shutdown info found. scanning datafiles for last tick..."); LOG_WARNING("no shutdown info found. scanning datafiles for last tick...");
} }
@ -1318,6 +1318,8 @@ static int InitDatabases (TRI_server_t* server,
res = TRI_ERROR_OUT_OF_MEMORY; res = TRI_ERROR_OUT_OF_MEMORY;
} }
} }
server->_hasCreatedSystemDatabase = true;
} }
if (res == TRI_ERROR_NO_ERROR && performUpgrade) { if (res == TRI_ERROR_NO_ERROR && performUpgrade) {
@ -1344,6 +1346,134 @@ static int InitDatabases (TRI_server_t* server,
return res; return res;
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief unloads all open collections after recovery
/// this is necessary to not mess up collection statistics
////////////////////////////////////////////////////////////////////////////////
static int SignalUnloadAll (TRI_server_t* server) {
LOG_TRACE("sending unload signal to all collections of all databases");
DatabaseReadLocker locker(&server->_databasesLock);
size_t const n = server->_databases._nrAlloc;
// iterate over all databases
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// iterate over all collections
TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase);
// starts unloading of collections
for (size_t j = 0; j < vocbase->_collections._length; ++j) {
TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(vocbase->_collections._buffer[j]);
TRI_UnloadCollectionVocBase(vocbase, collection, true);
}
TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase);
}
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief waits until all collections of all databases have unloaded
////////////////////////////////////////////////////////////////////////////////
static int WaitForUnloadAll (TRI_server_t* server) {
LOG_TRACE("unloading all collections of all databases");
while (true) {
bool mustWait = false;
// iterate over all databases
{
DatabaseReadLocker locker(&server->_databasesLock);
size_t const n = server->_databases._nrAlloc;
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// iterate over all collections
TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase);
// starts unloading of collections
for (size_t j = 0; j < vocbase->_collections._length; ++j) {
TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(vocbase->_collections._buffer[j]);
TRI_READ_LOCK_STATUS_VOCBASE_COL(collection);
bool isUnloaded = (collection->_status == TRI_VOC_COL_STATUS_UNLOADED ||
collection->_status == TRI_VOC_COL_STATUS_DELETED ||
collection->_status == TRI_VOC_COL_STATUS_CORRUPTED);
TRI_READ_UNLOCK_STATUS_VOCBASE_COL(collection);
if (! isUnloaded) {
// abort early
mustWait = true;
break;
}
}
TRI_READ_UNLOCK_COLLECTIONS_VOCBASE(vocbase);
if (mustWait) {
break;
}
}
}
}
if (mustWait) {
// let something else happen and retry
usleep(10 * 1000);
continue;
}
// all collections have unloaded
break;
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief unloads all open collections after recovery
/// this is necessary to not mess up collection statistics
////////////////////////////////////////////////////////////////////////////////
static int InitAll (TRI_server_t* server) {
DatabaseReadLocker locker(&server->_databasesLock);
size_t const n = server->_databases._nrAlloc;
// iterate over all databases
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// initialise the authentication data for the database
TRI_ReloadAuthInfo(vocbase);
// start the compactor for the database
TRI_StartCompactorVocBase(vocbase);
}
}
return TRI_ERROR_NO_ERROR;
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief database manager thread main loop /// @brief database manager thread main loop
/// the purpose of this thread is to physically remove directories of databases /// the purpose of this thread is to physically remove directories of databases
@ -1489,6 +1619,7 @@ int TRI_InitServer (TRI_server_t* server,
TRI_ASSERT(basePath != nullptr); TRI_ASSERT(basePath != nullptr);
server->_iterateMarkersOnOpen = iterateMarkersOnOpen; server->_iterateMarkersOnOpen = iterateMarkersOnOpen;
server->_hasCreatedSystemDatabase = false;
// c++ object, may be null in console mode // c++ object, may be null in console mode
server->_applicationEndpointServer = applicationEndpointServer; server->_applicationEndpointServer = applicationEndpointServer;
@ -1873,32 +2004,9 @@ int TRI_StartServer (TRI_server_t* server,
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
int TRI_InitDatabasesServer (TRI_server_t* server) { int TRI_InitDatabasesServer (TRI_server_t* server) {
DatabaseWriteLocker locker(&server->_databasesLock); SignalUnloadAll(server);
WaitForUnloadAll(server);
size_t n = server->_databases._nrAlloc; InitAll(server);
// iterate over all databases
for (size_t i = 0; i < n; ++i) {
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(server->_databases._table[i]);
if (vocbase != nullptr) {
TRI_ASSERT(vocbase->_type == TRI_VOCBASE_TYPE_NORMAL);
// iterate over all collections
TRI_WRITE_LOCK_COLLECTIONS_VOCBASE(vocbase);
// starts unloading of collections
for (size_t j = 0; j < vocbase->_collections._length; ++j) {
TRI_vocbase_col_t* collection = static_cast<TRI_vocbase_col_t*>(vocbase->_collections._buffer[j]);
TRI_UnloadCollectionVocBase(vocbase, collection, true);
}
TRI_WRITE_UNLOCK_COLLECTIONS_VOCBASE(vocbase);
// start the compactor for the database
TRI_StartCompactorVocBase(vocbase);
}
}
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }
@ -2085,6 +2193,7 @@ int TRI_CreateDatabaseServer (TRI_server_t* server,
CreateApplicationDirectory(vocbase->_name, server->_appPath); CreateApplicationDirectory(vocbase->_name, server->_appPath);
CreateApplicationDirectory(vocbase->_name, server->_devAppPath); CreateApplicationDirectory(vocbase->_name, server->_devAppPath);
TRI_ReloadAuthInfo(vocbase);
TRI_StartCompactorVocBase(vocbase); TRI_StartCompactorVocBase(vocbase);
// increase reference counter // increase reference counter

View File

@ -70,6 +70,7 @@ typedef struct TRI_server_s {
bool _disableReplicationLoggers; bool _disableReplicationLoggers;
bool _disableReplicationAppliers; bool _disableReplicationAppliers;
bool _iterateMarkersOnOpen; bool _iterateMarkersOnOpen;
bool _hasCreatedSystemDatabase;
bool _initialised; bool _initialised;
} }

View File

@ -179,6 +179,10 @@ static void FreeOperations (TRI_transaction_t* trx) {
if (mustRollback) { if (mustRollback) {
document->_info._revision = trxCollection->_originalRevision; document->_info._revision = trxCollection->_originalRevision;
} }
else if (! document->_info._isVolatile) {
// only count logfileEntries if the collection is durable
document->_uncollectedLogfileEntries += trxCollection->_operations->size();
}
delete trxCollection->_operations; delete trxCollection->_operations;
trxCollection->_operations = nullptr; trxCollection->_operations = nullptr;
@ -908,6 +912,8 @@ int TRI_AddOperationTransaction (triagens::wal::DocumentOperation& operation,
// operation is directly executed // operation is directly executed
operation.handle(); operation.handle();
++document->_uncollectedLogfileEntries;
if (operation.type == TRI_VOC_DOCUMENT_OPERATION_UPDATE || if (operation.type == TRI_VOC_DOCUMENT_OPERATION_UPDATE ||
operation.type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) { operation.type == TRI_VOC_DOCUMENT_OPERATION_REMOVE) {
// update datafile statistics for the old header // update datafile statistics for the old header
@ -937,7 +943,7 @@ int TRI_AddOperationTransaction (triagens::wal::DocumentOperation& operation,
copy->handle(); copy->handle();
} }
TRI_UpdateStatisticsDocumentCollection(document, operation.rid, false, 1); TRI_UpdateRevisionDocumentCollection(document, operation.rid, false);
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }

View File

@ -731,7 +731,7 @@ int TRI_InsertAttributeVocShaper (TRI_shaper_t* s,
char const* name = shaper->_collection->_info._name; char const* name = shaper->_collection->_info._name;
#ifdef TRI_ENABLE_MAINTAINER_MODE #ifdef TRI_ENABLE_MAINTAINER_MODE
LOG_WARNING("found duplicate attribute name '%s' in collection '%s'", p, name); LOG_ERROR("found duplicate attribute name '%s' in collection '%s'", p, name);
TRI_ASSERT(false); TRI_ASSERT(false);
#else #else
LOG_TRACE("found duplicate attribute name '%s' in collection '%s'", p, name); LOG_TRACE("found duplicate attribute name '%s' in collection '%s'", p, name);
@ -744,7 +744,7 @@ int TRI_InsertAttributeVocShaper (TRI_shaper_t* s,
char const* name = shaper->_collection->_info._name; char const* name = shaper->_collection->_info._name;
#ifdef TRI_ENABLE_MAINTAINER_MODE #ifdef TRI_ENABLE_MAINTAINER_MODE
LOG_WARNING("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name); LOG_ERROR("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name);
TRI_ASSERT(false); TRI_ASSERT(false);
#else #else
LOG_TRACE("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name); LOG_TRACE("found duplicate attribute id '%llu' in collection '%s'", (unsigned long long) m->_aid, name);

View File

@ -1460,8 +1460,6 @@ TRI_vocbase_t* TRI_OpenVocBase (TRI_server_t* server,
return NULL; return NULL;
} }
TRI_ReloadAuthInfo(vocbase);
ScanTrxCollection(vocbase); ScanTrxCollection(vocbase);
// ............................................................................. // .............................................................................
@ -1639,14 +1637,6 @@ int TRI_StopCompactorVocBase (TRI_vocbase_t* vocbase) {
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief load authentication information
////////////////////////////////////////////////////////////////////////////////
void TRI_LoadAuthInfoVocBase (TRI_vocbase_t* vocbase) {
vocbase->_authInfoLoaded = TRI_LoadAuthInfo(vocbase);
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief returns all known (document) collections /// @brief returns all known (document) collections
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -446,12 +446,6 @@ void TRI_StartCompactorVocBase (TRI_vocbase_t*);
int TRI_StopCompactorVocBase (TRI_vocbase_t*); int TRI_StopCompactorVocBase (TRI_vocbase_t*);
////////////////////////////////////////////////////////////////////////////////
/// @brief load authentication information
////////////////////////////////////////////////////////////////////////////////
void TRI_LoadAuthInfoVocBase (TRI_vocbase_t*);
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief returns all known collections /// @brief returns all known collections
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -348,10 +348,10 @@ void CollectorThread::run () {
} }
catch (triagens::arango::Exception const& ex) { catch (triagens::arango::Exception const& ex) {
int res = ex.code(); int res = ex.code();
LOG_ERROR("got unexpected error in collectorThread: %s", TRI_errno_string(res)); LOG_ERROR("got unexpected error in collectorThread::run: %s", TRI_errno_string(res));
} }
catch (...) { catch (...) {
LOG_ERROR("got unspecific error in collectorThread"); LOG_ERROR("got unspecific error in collectorThread::run");
} }
if (stop == 0 && ! worked) { if (stop == 0 && ! worked) {
@ -438,28 +438,36 @@ bool CollectorThread::processQueuedOperations () {
if (res == TRI_ERROR_LOCK_TIMEOUT) { if (res == TRI_ERROR_LOCK_TIMEOUT) {
// could not acquire write-lock for collection in time // could not acquire write-lock for collection in time
// do not delete the operations // do not delete the operations
++it2;
continue; continue;
} }
// delete the object
delete (*it2);
if (res == TRI_ERROR_NO_ERROR) { if (res == TRI_ERROR_NO_ERROR) {
LOG_TRACE("queued operations applied successfully"); LOG_TRACE("queued operations applied successfully");
} }
else if (res == TRI_ERROR_ARANGO_DATABASE_NOT_FOUND || else if (res == TRI_ERROR_ARANGO_DATABASE_NOT_FOUND ||
res == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) { res == TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
// these are expected errors
LOG_TRACE("removing queued operations for already deleted collection"); LOG_TRACE("removing queued operations for already deleted collection");
res = TRI_ERROR_NO_ERROR;
} }
else { else {
LOG_WARNING("got unexpected error code while applying queued operations: %s", TRI_errno_string(res)); LOG_WARNING("got unexpected error code while applying queued operations: %s", TRI_errno_string(res));
} }
if (res == TRI_ERROR_NO_ERROR) {
// delete the object
delete (*it2);
// delete the element from the vector while iterating over the vector // delete the element from the vector while iterating over the vector
it2 = operations.erase(it2); it2 = operations.erase(it2);
_logfileManager->decreaseCollectQueueSize(logfile); _logfileManager->decreaseCollectQueueSize(logfile);
} }
else {
++it2;
}
}
// next collection // next collection
} }
@ -506,6 +514,9 @@ int CollectorThread::processCollectionOperations (CollectorCache* cache) {
TRI_document_collection_t* document = collection->_collection; TRI_document_collection_t* document = collection->_collection;
// first try to read-lock the compactor-lock, afterwards try to write-lock the collection
// if any locking attempt fails, release and try again next time
if (! TRI_TryReadLockReadWriteLock(&document->_compactionLock)) { if (! TRI_TryReadLockReadWriteLock(&document->_compactionLock)) {
return TRI_ERROR_LOCK_TIMEOUT; return TRI_ERROR_LOCK_TIMEOUT;
} }
@ -524,6 +535,7 @@ int CollectorThread::processCollectionOperations (CollectorCache* cache) {
// now we have the write lock on the collection // now we have the write lock on the collection
LOG_TRACE("wal collector processing operations for collection '%s'", document->_info._name); LOG_TRACE("wal collector processing operations for collection '%s'", document->_info._name);
if (! _inRecovery) {
for (auto it = cache->operations->begin(); it != cache->operations->end(); ++it) { for (auto it = cache->operations->begin(); it != cache->operations->end(); ++it) {
auto operation = (*it); auto operation = (*it);
@ -611,6 +623,8 @@ int CollectorThread::processCollectionOperations (CollectorCache* cache) {
} }
} }
} // ! _inRecovery
// finally update all datafile statistics // finally update all datafile statistics
LOG_TRACE("updating datafile statistics for collection '%s'", document->_info._name); LOG_TRACE("updating datafile statistics for collection '%s'", document->_info._name);
updateDatafileStatistics(document, cache); updateDatafileStatistics(document, cache);
@ -666,6 +680,8 @@ bool CollectorThread::removeLogfiles () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
int CollectorThread::collect (Logfile* logfile) { int CollectorThread::collect (Logfile* logfile) {
TRI_ASSERT(logfile != nullptr);
LOG_TRACE("collecting logfile %llu", (unsigned long long) logfile->id()); LOG_TRACE("collecting logfile %llu", (unsigned long long) logfile->id());
TRI_datafile_t* df = logfile->df(); TRI_datafile_t* df = logfile->df();
@ -744,11 +760,16 @@ int CollectorThread::collect (Logfile* logfile) {
catch (triagens::arango::Exception const& ex) { catch (triagens::arango::Exception const& ex) {
res = ex.code(); res = ex.code();
} }
catch (...) {
res = TRI_ERROR_INTERNAL;
}
if (res != TRI_ERROR_NO_ERROR && if (res != TRI_ERROR_NO_ERROR &&
res != TRI_ERROR_ARANGO_DATABASE_NOT_FOUND && res != TRI_ERROR_ARANGO_DATABASE_NOT_FOUND &&
res != TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) { res != TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND) {
LOG_WARNING("got unexpected error in collect: %s", TRI_errno_string(res)); LOG_WARNING("got unexpected error in CollectorThread::collect: %s", TRI_errno_string(res));
// abort early
return res;
} }
} }
} }
@ -797,7 +818,6 @@ int CollectorThread::transferMarkers (Logfile* logfile,
totalOperationsCount, totalOperationsCount,
operations.size()); operations.size());
int res = TRI_ERROR_INTERNAL; int res = TRI_ERROR_INTERNAL;
try { try {
@ -808,15 +828,21 @@ int CollectorThread::transferMarkers (Logfile* logfile,
res = syncDatafileCollection(document); res = syncDatafileCollection(document);
// note: cache is passed by reference and can be modified by queueOperations // note: cache is passed by reference and can be modified by queueOperations
// (i.e. set to nullptr!)
queueOperations(logfile, cache); queueOperations(logfile, cache);
} }
} }
catch (triagens::arango::Exception const& ex) { catch (triagens::arango::Exception const& ex) {
res = ex.code(); res = ex.code();
} }
catch (...) {
res = TRI_ERROR_INTERNAL;
}
if (cache != nullptr) { if (cache != nullptr) {
// prevent memleak // prevent memleak
cache->freeBarriers();
delete cache; delete cache;
} }
@ -1115,6 +1141,7 @@ int CollectorThread::syncDatafileCollection (TRI_document_collection_t* document
// only place that's ever written to. if a journal is full, it will have been // only place that's ever written to. if a journal is full, it will have been
// sealed and synced already // sealed and synced already
size_t const n = collection->_journals._length; size_t const n = collection->_journals._length;
for (size_t i = 0; i < n; ++i) { for (size_t i = 0; i < n; ++i) {
TRI_datafile_t* datafile = static_cast<TRI_datafile_t*>(collection->_journals._buffer[i]); TRI_datafile_t* datafile = static_cast<TRI_datafile_t*>(collection->_journals._buffer[i]);
@ -1204,9 +1231,11 @@ char* CollectorThread::nextFreeMarkerPosition (TRI_document_collection_t* docume
TRI_datafile_t* datafile = TRI_CreateJournalDocumentCollection(document, tick, targetSize); TRI_datafile_t* datafile = TRI_CreateJournalDocumentCollection(document, tick, targetSize);
if (datafile == nullptr) { if (datafile == nullptr) {
LOG_ERROR("unable to create journal file"); int res = TRI_errno();
// could not create a datafile // could not create a datafile, this is a serious error
break; TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
THROW_ARANGO_EXCEPTION(res);
} }
} }
@ -1226,9 +1255,16 @@ leave:
// create a local datafile info struct // create a local datafile info struct
createDfi(cache, datafile->_fid); createDfi(cache, datafile->_fid);
if (! _inRecovery) {
// we only need the barriers when we are outside the recovery
// the compactor will not run during recovery
cache->addBarrier(TRI_CreateBarrierElement(&document->_barrierList)); cache->addBarrier(TRI_CreateBarrierElement(&document->_barrierList));
} }
} }
}
else {
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_NO_JOURNAL);
}
return dst; return dst;
} }

View File

@ -227,7 +227,8 @@ namespace triagens {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
inline bool canBeCollected () const { inline bool canBeCollected () const {
return (_status == StatusType::SEALED); return (_status == StatusType::SEALED ||
_status == StatusType::COLLECTION_REQUESTED);
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -279,7 +280,15 @@ namespace triagens {
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief change the logfile status /// @brief change the logfile status, without assertions
////////////////////////////////////////////////////////////////////////////////
void forceStatus (StatusType status) {
_status = status;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief change the logfile status, with assertions
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
void setStatus (StatusType status) { void setStatus (StatusType status) {

View File

@ -680,18 +680,9 @@ SlotInfo LogfileManager::allocate (void const* src,
uint32_t size) { uint32_t size) {
if (! _allowWrites) { if (! _allowWrites) {
// no writes allowed // no writes allowed
// check if this is a shape or attribute marker, which is allowed even in
// read-only mode
TRI_df_marker_t const* marker = static_cast<TRI_df_marker_t const*>(src);
if (marker->_type != TRI_WAL_MARKER_ATTRIBUTE &&
marker->_type != TRI_WAL_MARKER_SHAPE) {
return SlotInfo(TRI_ERROR_ARANGO_READ_ONLY); return SlotInfo(TRI_ERROR_ARANGO_READ_ONLY);
} }
// fallthrough
}
if (size > MaxEntrySize()) { if (size > MaxEntrySize()) {
// entry is too big // entry is too big
return SlotInfo(TRI_ERROR_ARANGO_DOCUMENT_TOO_LARGE); return SlotInfo(TRI_ERROR_ARANGO_DOCUMENT_TOO_LARGE);
@ -748,6 +739,35 @@ SlotInfoCopy LogfileManager::allocateAndWrite (void* src,
} }
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief set all open logfiles to status sealed
////////////////////////////////////////////////////////////////////////////////
void LogfileManager::setAllSealed () {
READ_LOCKER(_logfilesLock);
for (auto it = _logfiles.begin(); it != _logfiles.end(); ++it) {
Logfile* logfile = (*it).second;
if (logfile != nullptr) {
Logfile::StatusType status = logfile->status();
if (status == Logfile::StatusType::OPEN ||
status == Logfile::StatusType::SEAL_REQUESTED) {
// set all logfiles to sealed status so they can be collected
// we don't care about the previous status here
logfile->forceStatus(Logfile::StatusType::SEALED);
if (logfile->id() > _lastSealedId) {
// adjust last sealed id
_lastSealedId = logfile->id();
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief finalise and seal the currently open logfile /// @brief finalise and seal the currently open logfile
/// this is useful to ensure that any open writes up to this point have made /// this is useful to ensure that any open writes up to this point have made
@ -757,31 +777,55 @@ SlotInfoCopy LogfileManager::allocateAndWrite (void* src,
int LogfileManager::flush (bool waitForSync, int LogfileManager::flush (bool waitForSync,
bool waitForCollector, bool waitForCollector,
bool writeShutdownFile) { bool writeShutdownFile) {
Logfile::IdType currentLogfileId; TRI_ASSERT(! _inRecovery);
Logfile::IdType lastOpenLogfileId;
Logfile::IdType lastSealedLogfileId;
{ {
READ_LOCKER(_logfilesLock); READ_LOCKER(_logfilesLock);
currentLogfileId = _lastOpenedId; lastOpenLogfileId = _lastOpenedId;
lastSealedLogfileId = _lastSealedId;
}
if (lastOpenLogfileId == 0) {
return TRI_ERROR_NO_ERROR;
} }
LOG_TRACE("about to flush active WAL logfile. currentLogfileId: %llu, waitForSync: %d, waitForCollector: %d", LOG_TRACE("about to flush active WAL logfile. currentLogfileId: %llu, waitForSync: %d, waitForCollector: %d",
(unsigned long long) currentLogfileId, (unsigned long long) lastOpenLogfileId,
(int) waitForSync, (int) waitForSync,
(int) waitForCollector); (int) waitForCollector);
int res = _slots->flush(waitForSync); int res = _slots->flush(waitForSync);
if (res == TRI_ERROR_NO_ERROR || res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) { if (res != TRI_ERROR_NO_ERROR &&
res != TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
LOG_ERROR("unexpected error in WAL flush request: %s", TRI_errno_string(res));
return res;
}
if (waitForCollector) { if (waitForCollector) {
this->waitForCollector(currentLogfileId); if (res == TRI_ERROR_NO_ERROR) {
// we need to wait for the collector...
this->waitForCollector(lastOpenLogfileId);
}
else if (res == TRI_ERROR_ARANGO_DATAFILE_EMPTY) {
// current logfile is empty and cannot be collected
// we need to wait for the collector to collect the previously sealed datafile
if (lastSealedLogfileId > 0) {
this->waitForCollector(lastSealedLogfileId);
}
}
} }
if (writeShutdownFile) { if (writeShutdownFile) {
// update the file with the last tick, last sealed etc. // update the file with the last tick, last sealed etc.
return writeShutdownInfo(false); return writeShutdownInfo(false);
} }
}
return res; return TRI_ERROR_NO_ERROR;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -955,6 +999,11 @@ Logfile* LogfileManager::getWriteableLogfile (uint32_t size,
static const uint64_t MaxIterations = 1000; static const uint64_t MaxIterations = 1000;
size_t iterations = 0; size_t iterations = 0;
TRI_DEBUG_INTENTIONAL_FAIL_IF("LogfileManagerGetWriteableLogfile") {
// intentionally don't return a logfile
return nullptr;
}
while (++iterations < 1000) { while (++iterations < 1000) {
{ {
WRITE_LOCKER(_logfilesLock); WRITE_LOCKER(_logfilesLock);
@ -982,7 +1031,6 @@ Logfile* LogfileManager::getWriteableLogfile (uint32_t size,
// and physically remove the file // and physically remove the file
// note: this will also delete the logfile object! // note: this will also delete the logfile object!
removeLogfile(logfile, false); removeLogfile(logfile, false);
} }
else { else {
++it; ++it;
@ -1103,6 +1151,12 @@ void LogfileManager::setCollectionRequested (Logfile* logfile) {
{ {
WRITE_LOCKER(_logfilesLock); WRITE_LOCKER(_logfilesLock);
if (logfile->status() == Logfile::StatusType::COLLECTION_REQUESTED) {
// the collector already asked for this file, but couldn't process it
// due to some exception
return;
}
logfile->setStatus(Logfile::StatusType::COLLECTION_REQUESTED); logfile->setStatus(Logfile::StatusType::COLLECTION_REQUESTED);
} }
@ -1230,6 +1284,10 @@ bool LogfileManager::runRecovery () {
_droppedCollections = state.droppedCollections; _droppedCollections = state.droppedCollections;
} }
// "seal" any open logfiles so the collector can copy over everything
this->setAllSealed();
int res = startCollectorThread(); int res = startCollectorThread();
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
@ -1239,10 +1297,8 @@ bool LogfileManager::runRecovery () {
TRI_ASSERT(_collectorThread != nullptr); TRI_ASSERT(_collectorThread != nullptr);
LOG_TRACE("issuing recovery flush request"); LOG_TRACE("waiting for collector to catch up");
waitForCollector(_lastOpenedId);
// flush any open logfiles so the collector can copy over everything
this->flush(true, true, false);
{ {
// reset the list of failed transactions // reset the list of failed transactions
@ -1252,6 +1308,16 @@ bool LogfileManager::runRecovery () {
_droppedCollections.clear(); _droppedCollections.clear();
} }
// finished recovery
_inRecovery = false;
// from now on, we allow writes to the logfile
allowWrites(true);
// tell the collector that the recovery is over now
_collectorThread->recoveryDone();
_allocatorThread->recoveryDone();
// unload all collections to reset statistics, start compactor threads etc. // unload all collections to reset statistics, start compactor threads etc.
res = TRI_InitDatabasesServer(_server); res = TRI_InitDatabasesServer(_server);
@ -1260,17 +1326,6 @@ bool LogfileManager::runRecovery () {
return false; return false;
} }
// TODO: how long must we sleep here?
sleep(3);
// tell the collector that the recovery is over now
_inRecovery = false;
_collectorThread->recoveryDone();
_allocatorThread->recoveryDone();
// from now on, we allow writes to the logfile
allowWrites(true);
if (logfilesToCollect > 0) { if (logfilesToCollect > 0) {
LOG_INFO("WAL recovery finished successfully"); LOG_INFO("WAL recovery finished successfully");
} }
@ -1608,12 +1663,16 @@ int LogfileManager::openLogfiles () {
_lastSealedId = id; _lastSealedId = id;
} }
if ((logfile->status() == Logfile::StatusType::SEALED || logfile->status() == Logfile::StatusType::OPEN) &&
id > _lastOpenedId) {
_lastOpenedId = id;
}
(*it).second = logfile; (*it).second = logfile;
++it; ++it;
} }
_lastOpenedId = _lastSealedId;
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }

View File

@ -182,6 +182,14 @@ struct RecoverState {
return _reserveLogfiles; return _reserveLogfiles;
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief set the number of reserve logfiles
////////////////////////////////////////////////////////////////////////////////
inline void reserveLogfiles (uint32_t value) {
_reserveLogfiles = value;
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief get the number of historic logfiles to keep /// @brief get the number of historic logfiles to keep
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -287,6 +295,12 @@ struct RecoverState {
uint32_t, uint32_t,
bool); bool);
////////////////////////////////////////////////////////////////////////////////
/// @brief set all open logfiles to status sealed
////////////////////////////////////////////////////////////////////////////////
void setAllSealed ();
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief finalise and seal the currently open logfile /// @brief finalise and seal the currently open logfile
/// this is useful to ensure that any open writes up to this point have made /// this is useful to ensure that any open writes up to this point have made
@ -409,6 +423,14 @@ struct RecoverState {
_allowWrites = value; _allowWrites = value;
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief whether or not we are in the recovery mode
////////////////////////////////////////////////////////////////////////////////
inline bool isInRecovery () const {
return _inRecovery;
}
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// --SECTION-- private methods // --SECTION-- private methods
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

View File

@ -118,11 +118,12 @@ Slot::TickType Slots::lastCommittedTick () {
SlotInfo Slots::nextUnused (uint32_t size) { SlotInfo Slots::nextUnused (uint32_t size) {
// we need to use the aligned size for writing // we need to use the aligned size for writing
uint32_t alignedSize = TRI_DF_ALIGN_BLOCK(size); uint32_t alignedSize = TRI_DF_ALIGN_BLOCK(size);
int iterations = 0;
bool hasWaited = false; bool hasWaited = false;
TRI_ASSERT(size > 0); TRI_ASSERT(size > 0);
while (true) { while (++iterations < 1000) {
{ {
MUTEX_LOCKER(_lock); MUTEX_LOCKER(_lock);
@ -152,6 +153,8 @@ SlotInfo Slots::nextUnused (uint32_t size) {
// advance to next slot // advance to next slot
slot = &_slots[_handoutIndex]; slot = &_slots[_handoutIndex];
_logfileManager->setLogfileSealRequested(_logfile); _logfileManager->setLogfileSealRequested(_logfile);
_logfile = nullptr;
} }
// fetch the next free logfile (this may create a new one) // fetch the next free logfile (this may create a new one)
@ -159,6 +162,11 @@ SlotInfo Slots::nextUnused (uint32_t size) {
if (_logfile == nullptr) { if (_logfile == nullptr) {
usleep(10 * 1000); usleep(10 * 1000);
TRI_DEBUG_INTENTIONAL_FAIL_IF("LogfileManagerGetWriteableLogfile") {
return SlotInfo(TRI_ERROR_ARANGO_NO_JOURNAL);
}
// try again in next iteration // try again in next iteration
} }
else if (status == Logfile::StatusType::EMPTY) { else if (status == Logfile::StatusType::EMPTY) {
@ -210,6 +218,8 @@ SlotInfo Slots::nextUnused (uint32_t size) {
guard.wait(10 * 1000); guard.wait(10 * 1000);
} }
} }
return SlotInfo(TRI_ERROR_ARANGO_NO_JOURNAL);
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -267,7 +277,7 @@ SyncRegion Slots::getSyncRegion () {
break; break;
} }
// LOG_INFO("group commit"); // this is a group commit!!
// update the region // update the region
region.size += (uint32_t) (static_cast<char*>(slot->mem()) - (region.mem + region.size) + slot->size()); region.size += (uint32_t) (static_cast<char*>(slot->mem()) - (region.mem + region.size) + slot->size());
@ -345,10 +355,11 @@ void Slots::returnSyncRegion (SyncRegion const& region) {
int Slots::closeLogfile (Slot::TickType& lastCommittedTick, int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
bool& worked) { bool& worked) {
int iterations = 0;
bool hasWaited = false; bool hasWaited = false;
worked = false; worked = false;
while (true) { while (++iterations < 1000) {
{ {
MUTEX_LOCKER(_lock); MUTEX_LOCKER(_lock);
@ -375,16 +386,20 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
int res = writeFooter(slot); int res = writeFooter(slot);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
LOG_ERROR("could not write logfile footer: %s", TRI_errno_string(res));
return res; return res;
} }
_logfileManager->setLogfileSealRequested(_logfile); _logfileManager->setLogfileSealRequested(_logfile);
// advance to next slot
slot = &_slots[_handoutIndex];
// invalidate the logfile so for the next write we'll use a // invalidate the logfile so for the next write we'll use a
// new one // new one
_logfile = nullptr; _logfile = nullptr;
worked = true;
return TRI_ERROR_NO_ERROR; // fall-through intentional
} }
TRI_ASSERT(_logfile == nullptr); TRI_ASSERT(_logfile == nullptr);
@ -394,6 +409,10 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
Logfile::StatusType status = newLogfile(1); Logfile::StatusType status = newLogfile(1);
if (_logfile == nullptr) { if (_logfile == nullptr) {
TRI_DEBUG_INTENTIONAL_FAIL_IF("LogfileManagerGetWriteableLogfile") {
return TRI_ERROR_ARANGO_NO_JOURNAL;
}
usleep(10 * 1000); usleep(10 * 1000);
// try again in next iteration // try again in next iteration
} }
@ -402,11 +421,15 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
int res = writeHeader(slot); int res = writeHeader(slot);
if (res != TRI_ERROR_NO_ERROR) { if (res != TRI_ERROR_NO_ERROR) {
LOG_ERROR("could not write logfile header: %s", TRI_errno_string(res));
return res; return res;
} }
// advance to next slot
slot = &_slots[_handoutIndex];
_logfileManager->setLogfileOpen(_logfile); _logfileManager->setLogfileOpen(_logfile);
worked = false; worked = true;
return TRI_ERROR_NO_ERROR; return TRI_ERROR_NO_ERROR;
} }
else { else {
@ -434,6 +457,8 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
guard.wait(10 * 1000); guard.wait(10 * 1000);
} }
} }
return TRI_ERROR_ARANGO_NO_JOURNAL;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -441,7 +466,7 @@ int Slots::closeLogfile (Slot::TickType& lastCommittedTick,
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
int Slots::writeHeader (Slot* slot) { int Slots::writeHeader (Slot* slot) {
TRI_df_header_marker_t header = _logfile->getHeaderMarker(); TRI_df_header_marker_t&& header = _logfile->getHeaderMarker();
size_t const size = header.base._size; size_t const size = header.base._size;
TRI_df_marker_t* mem = reinterpret_cast<TRI_df_marker_t*>(_logfile->reserve(size)); TRI_df_marker_t* mem = reinterpret_cast<TRI_df_marker_t*>(_logfile->reserve(size));
@ -461,7 +486,7 @@ int Slots::writeHeader (Slot* slot) {
int Slots::writeFooter (Slot* slot) { int Slots::writeFooter (Slot* slot) {
TRI_ASSERT(_logfile != nullptr); TRI_ASSERT(_logfile != nullptr);
TRI_df_footer_marker_t footer = _logfile->getFooterMarker(); TRI_df_footer_marker_t&& footer = _logfile->getFooterMarker();
size_t const size = footer.base._size; size_t const size = footer.base._size;
TRI_df_marker_t* mem = reinterpret_cast<TRI_df_marker_t*>(_logfile->reserve(size)); TRI_df_marker_t* mem = reinterpret_cast<TRI_df_marker_t*>(_logfile->reserve(size));

View File

@ -1,7 +1,5 @@
/*jslint indent: 2, maxlen: 120, vars: true, white: true, plusplus: true, nonpropdel: true, nomen: true, sloppy: true */ /*jslint indent: 2, maxlen: 120, vars: true, white: true, plusplus: true, nonpropdel: true, nomen: true, sloppy: true */
/*global require, assertEqual, assertNotEqual, /*global require, assertEqual */
print, print_plain, COMPARE_STRING, NORMALIZE_STRING,
help, start_pager, stop_pager, start_pretty_print, stop_pretty_print, start_color_print, stop_color_print */
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief tests for client-specific functionality /// @brief tests for client-specific functionality
@ -53,6 +51,14 @@ function changeOperationModePositiveCaseTestSuite () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
tearDown : function () { tearDown : function () {
// reset server-mode after executing this test
db._executeTransaction({
collections: { },
action: function () {
var db = require('internal').db;
db._changeMode('Normal');
}
});
}, },
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -63,8 +69,11 @@ function changeOperationModePositiveCaseTestSuite () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
testChangeMode : function () { testChangeMode : function () {
var result = // we need to use a transaction (the transaction is shipped to the
db._executeTransaction({collections: {}, // server and executed there) to execute the changeMode function call on
// the server...
var result = db._executeTransaction({
collections: { },
action: function () { action: function () {
var db = require('internal').db; var db = require('internal').db;
var result = db._changeMode('ReadOnly'); var result = db._changeMode('ReadOnly');

View File

@ -1,7 +1,5 @@
/*jslint indent: 2, maxlen: 120, vars: true, white: true, plusplus: true, nonpropdel: true, nomen: true, sloppy: true */ /*jslint indent: 2, maxlen: 120, vars: true, white: true, plusplus: true, nonpropdel: true, nomen: true, sloppy: true */
/*global require, assertEqual, assertNotEqual, /*global require, assertEqual */
print, print_plain, COMPARE_STRING, NORMALIZE_STRING,
help, start_pager, stop_pager, start_pretty_print, stop_pretty_print, start_color_print, stop_color_print */
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief tests for client-specific functionality /// @brief tests for client-specific functionality
@ -64,19 +62,21 @@ function changeOperationModeNegativeCaseTestSuite () {
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
testTryChangeMode : function () { testTryChangeMode : function () {
var modified = true;
try { try {
db._executeTransaction({collections: {}, db._executeTransaction({
collections: {},
action: function () { action: function () {
var db = require('internal').db; var db = require('internal').db;
var result = db._changeMode('ReadOnly'); var result = db._changeMode('ReadOnly');
return result; return result;
} }
});} catch(e) { });
assertEqual(arangodb.errors.ERROR_FORBIDDEN.code, e.errorNum);
modified = false; fail();
}
catch (e) {
assertEqual(arangodb.errors.ERROR_FORBIDDEN.code, e.errorNum);
} }
assertFalse(modified);
} }
}; };

View File

@ -63,10 +63,16 @@ function createStatisticsCollection (name) {
var collection = db._collection(name); var collection = db._collection(name);
if (collection === null) { if (collection === null) {
var r = db._create(name, { isSystem: true, waitForSync: false }); var r = null;
try {
r = db._create(name, { isSystem: true, waitForSync: false });
}
catch (err) {
}
if (! r) { if (! r) {
return; return false;
} }
collection = db._collection(name); collection = db._collection(name);
@ -75,6 +81,8 @@ function createStatisticsCollection (name) {
if (collection !== null) { if (collection !== null) {
collection.ensureSkiplist("time"); collection.ensureSkiplist("time");
} }
return true;
} }
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -91,7 +99,7 @@ function createStatisticsCollections () {
'use strict'; 'use strict';
if (initialized) { if (initialized) {
return; return true;
} }
initialized = true; initialized = true;
@ -100,10 +108,14 @@ function createStatisticsCollections () {
var i; var i;
for (i = 0; i < names.length; ++i) { for (i = 0; i < names.length; ++i) {
createStatisticsCollection(names[i]); if (! createStatisticsCollection(names[i])) {
return false;
} }
} }
return true;
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief collectGarbage /// @brief collectGarbage
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
@ -478,7 +490,9 @@ exports.STATISTICS_HISTORY_INTERVAL = 15 * 60;
exports.historian = function () { exports.historian = function () {
"use strict"; "use strict";
createStatisticsCollections(); if (! createStatisticsCollections()) {
return;
}
var statsRaw = db._statisticsRaw; var statsRaw = db._statisticsRaw;
var statsCol = db._statistics; var statsCol = db._statistics;
@ -537,7 +551,9 @@ exports.historian = function () {
exports.historianAverage = function () { exports.historianAverage = function () {
"use strict"; "use strict";
createStatisticsCollections(); if (! createStatisticsCollections()) {
return;
}
var stats15m = db._statistics15; var stats15m = db._statistics15;
@ -589,7 +605,9 @@ exports.historianAverage = function () {
exports.garbageCollector = function () { exports.garbageCollector = function () {
'use strict'; 'use strict';
createStatisticsCollections(); if (! createStatisticsCollections()) {
return;
}
var time = internal.time(); var time = internal.time();

View File

@ -56,9 +56,20 @@ exports.Helper = {
collection.unload(); collection.unload();
internal.flushWal(); internal.flushWal();
var iterations = 0;
while (collection.status() !== arangodb.ArangoCollection.STATUS_UNLOADED) { while (collection.status() !== arangodb.ArangoCollection.STATUS_UNLOADED) {
collection.unload(); collection.unload();
internal.wait(1); internal.wait(0.25);
++iterations;
if (iterations === 20) {
require("console").log("waiting for collection " + collection.name() + " to unload");
}
else if (iterations === 400) {
throw "waited too long for unload of collection " + collection.name();
}
} }
}, },

View File

@ -17,7 +17,12 @@ runJSUnityTests = function (tests) {
var result = true; var result = true;
_.each(tests, function (file) { _.each(tests, function (file) {
if (result) {
print("\nRunning JSUnity test from file '" + file + "'"); print("\nRunning JSUnity test from file '" + file + "'");
}
else {
print("\nSkipping JSUnity test from file '" + file + "' due to previous errors");
}
try { try {
result = result && runTest(file); result = result && runTest(file);

View File

@ -27,6 +27,7 @@
var jsunity = require("jsunity"); var jsunity = require("jsunity");
var internal = require("internal"); var internal = require("internal");
var testHelper = require("org/arangodb/test-helper").Helper;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// --SECTION-- collection methods // --SECTION-- collection methods
@ -138,9 +139,9 @@ function CollectionVolatileSuite () {
c.save({"test": true}); c.save({"test": true});
assertEqual(1, c.count()); assertEqual(1, c.count());
c.unload();
internal.wait(4); testHelper.waitUnload(c);
assertEqual(true, c.properties().isVolatile); assertEqual(true, c.properties().isVolatile);
assertEqual(0, c.count()); assertEqual(0, c.count());
}, },
@ -159,8 +160,7 @@ function CollectionVolatileSuite () {
assertEqual(10000, c.count()); assertEqual(10000, c.count());
c.unload(); testHelper.waitUnload(c);
c = null;
internal.wait(5); internal.wait(5);
c = internal.db[cn]; c = internal.db[cn];

View File

@ -1,7 +1,7 @@
/*jslint indent: 2, nomen: true, maxlen: 120, sloppy: true, vars: true, white: true, plusplus: true, nonpropdel: true */ /*jslint indent: 2, nomen: true, maxlen: 120, sloppy: true, vars: true, white: true, plusplus: true, nonpropdel: true */
/*global require, db, ArangoCollection, ArangoDatabase, ArangoCursor, module, /*global require, db, ArangoCollection, ArangoDatabase, ArangoCursor, module,
ShapedJson, RELOAD_AUTH, SYS_DEFINE_ACTION, SYS_EXECUTE_GLOBAL_CONTEXT_FUNCTION, ShapedJson, RELOAD_AUTH, SYS_DEFINE_ACTION, SYS_EXECUTE_GLOBAL_CONTEXT_FUNCTION,
AHUACATL_RUN, AHUACATL_PARSE, AHUACATL_EXPLAIN, WAL_FLUSH */ AHUACATL_RUN, AHUACATL_PARSE, AHUACATL_EXPLAIN, WAL_FLUSH, WAL_ADJUST */
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief module "internal" /// @brief module "internal"
@ -111,6 +111,15 @@
delete WAL_FLUSH; delete WAL_FLUSH;
} }
////////////////////////////////////////////////////////////////////////////////
/// @brief adjusts the write-ahead log configuration
////////////////////////////////////////////////////////////////////////////////
if (typeof WAL_ADJUST !== "undefined") {
internal.adjustWal = WAL_ADJUST;
delete WAL_ADJUST;
}
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
/// @brief defines an action /// @brief defines an action
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,89 @@
var db = require("org/arangodb").db;
var internal = require("internal");
var jsunity = require("jsunity");
function runSetup () {
internal.debugClearFailAt();
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery");
internal.debugSetFailAt("CreateJournalDocumentCollection");
db._executeTransaction({
collections: {
write: "UnitTestsRecovery"
},
action: function () {
var db = require("org/arangodb").db;
var i, c = db._collection("UnitTestsRecovery");
for (i = 0; i < 100000; ++i) {
c.save({ _key: "test" + i, value1: "test" + i, value2: i }, true); // wait for sync
}
for (i = 0; i < 100000; i += 2) {
c.remove("test" + i, true);
}
}
});
internal.flushWal();
internal.wait(5);
internal.debugSegfault("crashing server");
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {
},
tearDown: function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we can restore the trx data
////////////////////////////////////////////////////////////////////////////////
testDiskFullNoJournal : function () {
var i, c = db._collection("UnitTestsRecovery");
assertEqual(50000, c.count());
for (i = 0; i < 100000; ++i) {
if (i % 2 == 0) {
assertFalse(c.exists("test" + i));
}
else {
assertEqual("test" + i, c.document("test" + i)._key);
assertEqual("test" + i, c.document("test" + i).value1);
assertEqual(i, c.document("test" + i).value2);
}
}
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (argv[1] === "setup") {
runSetup();
return 0;
}
else {
jsunity.run(recoverySuite);
return jsunity.done() ? 0 : 1;
}
}

View File

@ -0,0 +1,78 @@
var db = require("org/arangodb").db;
var internal = require("internal");
var jsunity = require("jsunity");
function runSetup () {
internal.debugClearFailAt();
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery"), i;
internal.debugSetFailAt("LogfileManagerWriteShutdown");
db._executeTransaction({
collections: {
write: [ "UnitTestsRecovery" ]
},
action: function () {
var db = require("org/arangodb").db, i;
var c = db._collection("UnitTestsRecovery");
for (i = 0; i < 200000; ++i) {
c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i });
}
}
});
internal.debugSegfault("crashing server");
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {
},
tearDown: function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we can restore the 10 collections
////////////////////////////////////////////////////////////////////////////////
testNoShutdownInfoMultipleLogs : function () {
var c = db._collection("UnitTestsRecovery");
assertEqual(200000, c.count());
var i;
for (i = 0; i < 200000; ++i) {
assertEqual(i, c.document("test" + i).value1);
assertEqual("foobarbaz" + i, c.document("test" + i).value2);
}
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (argv[1] === "setup") {
runSetup();
return 0;
}
else {
jsunity.run(recoverySuite);
return jsunity.done() ? 0 : 1;
}
}

View File

@ -0,0 +1,88 @@
var db = require("org/arangodb").db;
var internal = require("internal");
var jsunity = require("jsunity");
function runSetup () {
internal.debugClearFailAt();
db._drop("UnitTestsRecovery");
var c = db._create("UnitTestsRecovery"), i;
internal.debugSetFailAt("LogfileManagerWriteShutdown");
for (i = 0; i < 100; ++i) {
c.save({ _key: "old" + i, a: i });
}
db._executeTransaction({
collections: {
write: [ "UnitTestsRecovery" ]
},
action: function () {
var db = require("org/arangodb").db;
var c = db._collection("UnitTestsRecovery");
var i;
for (i = 0; i < 10000; ++i) {
c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i }, true);
}
for (i = 0; i < 50; ++i) {
c.remove("old" + i);
}
}
});
internal.debugSegfault("crashing server");
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function recoverySuite () {
jsunity.jsUnity.attachAssertions();
return {
setUp: function () {
},
tearDown: function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test whether we can restore the 10 collections
////////////////////////////////////////////////////////////////////////////////
testNoShutdownInfoNoFlush : function () {
var c = db._collection("UnitTestsRecovery");
assertEqual(10050, c.count());
var i;
for (i = 0; i < 10000; ++i) {
assertEqual(i, c.document("test" + i).value1);
assertEqual("foobarbaz" + i, c.document("test" + i).value2);
}
for (i = 50; i < 100; ++i) {
assertEqual(i, c.document("old" + i).a);
}
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
function main (argv) {
if (argv[1] === "setup") {
runSetup();
return 0;
}
else {
jsunity.run(recoverySuite);
return jsunity.done() ? 0 : 1;
}
}

View File

@ -14,8 +14,6 @@ function runSetup () {
c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i }); c.save({ _key: "test" + i, value1: i, value2: "foobarbaz" + i });
} }
internal.debugSetFailAt("LogfileManagerWriteShutdown");
// flush the logfile but do not write shutdown info // flush the logfile but do not write shutdown info
internal.flushWal(true, true); internal.flushWal(true, true);

View File

@ -4035,6 +4035,7 @@ function transactionServerFailuresSuite () {
write: [ cn ], write: [ cn ],
}, },
action: function () { action: function () {
var i;
for (i = 100; i < 150; ++i) { for (i = 100; i < 150; ++i) {
c.save({ _key: "test" + i, a: i }); c.save({ _key: "test" + i, a: i });
} }
@ -4062,6 +4063,121 @@ function transactionServerFailuresSuite () {
assertEqual(undefined, c.document("test" + i).b); assertEqual(undefined, c.document("test" + i).b);
} }
}); });
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test disk full during collection
////////////////////////////////////////////////////////////////////////////////
testDiskFullWhenCollectingTransaction : function () {
internal.debugClearFailAt();
db._drop(cn);
c = db._create(cn);
// should not cause any problems yet, but later
internal.debugSetFailAt("CreateJournalDocumentCollection");
// adjust the configuration and make sure we flush all reserve logfiles
internal.adjustWal({ reserveLogfiles: 1 });
var i;
for (i = 0; i < 100; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(100, c.count());
for (i = 0; i < 4; ++i) {
// write something into the logs so we can flush 'em
c.save({ _key: "foo" });
c.remove("foo");
internal.flushWal(true, false);
}
// one more to populate a new logfile
c.save({ _key: "foo" });
c.remove("foo");
TRANSACTION({
collections: {
write: [ cn ],
},
action: function () {
var i;
for (i = 100; i < 150; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(150, c.count());
// make sure we fill up the logfile
for (i = 0; i < 100000; ++i) {
c.save({ _key: "foo" + i, value: "the quick brown foxx jumped over the lazy dog" });
}
}
});
assertEqual(100150, c.count());
var fig = c.figures();
assertEqual(100160, fig.uncollectedLogfileEntries);
internal.debugClearFailAt();
internal.flushWal(true, true);
assertEqual(100150, c.count());
testHelper.waitUnload(c);
// data should be there after unload
assertEqual(100150, c.count());
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test: disk full during transaction
////////////////////////////////////////////////////////////////////////////////
testDiskFullDuringTransaction : function () {
internal.debugClearFailAt();
db._drop(cn);
c = db._create(cn);
var i;
for (i = 0; i < 100; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(100, c.count());
internal.flushWal(true, true);
try {
TRANSACTION({
collections: {
write: [ cn ],
},
action: function () {
var i;
for (i = 100; i < 150; ++i) {
c.save({ _key: "test" + i, a: i });
}
assertEqual(150, c.count());
// should not cause any problems
internal.debugSetFailAt("LogfileManagerGetWriteableLogfile");
for (i = 0; i < 200000; ++i) {
c.save({ _key: "foo" + i, value: "the quick brown foxx jumped over the lazy dog" });
}
fail();
}
});
}
catch (err) {
assertEqual(internal.errors.ERROR_ARANGO_NO_JOURNAL.code, err.errorNum);
}
assertEqual(100, c.count());
internal.debugClearFailAt();
} }
}; };
@ -4075,6 +4191,11 @@ function transactionServerFailuresSuite () {
/// @brief executes the test suites /// @brief executes the test suites
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
// only run this test suite if server-side failures are enabled
if (internal.debugCanUseFailAt()) {
jsunity.run(transactionServerFailuresSuite);
}
jsunity.run(transactionInvocationSuite); jsunity.run(transactionInvocationSuite);
jsunity.run(transactionCollectionsSuite); jsunity.run(transactionCollectionsSuite);
jsunity.run(transactionOperationsSuite); jsunity.run(transactionOperationsSuite);
@ -4085,11 +4206,6 @@ jsunity.run(transactionCountSuite);
jsunity.run(transactionCrossCollectionSuite); jsunity.run(transactionCrossCollectionSuite);
jsunity.run(transactionConstraintsSuite); jsunity.run(transactionConstraintsSuite);
// only run this test suite if server-side failures are enabled
if (internal.debugCanUseFailAt()) {
jsunity.run(transactionServerFailuresSuite);
}
return jsunity.done(); return jsunity.done();
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------

View File

@ -0,0 +1,134 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test the collection interface
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Dr. Frank Celler
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var jsunity = require("jsunity");
var arangodb = require("org/arangodb");
var testHelper = require("org/arangodb/test-helper").Helper;
var db = arangodb.db;
var internal = require("internal");
// -----------------------------------------------------------------------------
// --SECTION-- wal functions
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function WalSuite () {
var cn = "UnitTestsWal";
var c;
return {
setUp: function () {
db._drop(cn);
c = db._create(cn);
},
tearDown: function () {
db._drop(cn);
c = null;
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test max tick
////////////////////////////////////////////////////////////////////////////////
testMaxTickEmptyCollection : function () {
var fig = c.figures();
// we shouldn't have a tick yet
assertEqual("0", fig.lastTick);
assertEqual(0, fig.uncollectedLogfileEntries);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief max tick
////////////////////////////////////////////////////////////////////////////////
testMaxTickNonEmptyCollection : function () {
internal.flushWal(true, true);
var i;
for (i = 0; i < 100; ++i) {
c.save({ test: i });
}
// we shouldn't have a tick yet
var fig = c.figures();
assertEqual("0", fig.lastTick);
assertTrue(fig.uncollectedLogfileEntries > 0);
internal.flushWal(true, true);
// now we should have a tick
fig = c.figures();
assertNotEqual("0", fig.lastTick);
assertEqual(0, fig.uncollectedLogfileEntries);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief max tick
////////////////////////////////////////////////////////////////////////////////
testMaxTickAfterUnload : function () {
var i;
for (i = 0; i < 100; ++i) {
c.save({ test: i });
}
internal.flushWal(true, true);
testHelper.waitUnload(c);
// we should have a tick and no uncollected entries
fig = c.figures();
assertNotEqual("0", fig.lastTick);
assertEqual(0, fig.uncollectedLogfileEntries);
}
};
}
// -----------------------------------------------------------------------------
// --SECTION-- main
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suites
////////////////////////////////////////////////////////////////////////////////
jsunity.run(WalSuite);
return jsunity.done();
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// @addtogroup\\|// --SECTION--\\|/// @page\\|/// @}\\)"
// End: