1
0
Fork 0

fixed too eager compaction

Conflicts:
	CHANGELOG
	arangod/VocBase/compactor.c
This commit is contained in:
Jan Steemann 2014-03-10 16:44:02 +01:00
parent d178ec939c
commit 3c88e45802
4 changed files with 69 additions and 14 deletions

View File

@ -217,7 +217,16 @@ v2.0.0-rc1 (2014-02-28)
* added collection status "loading"
v1.4.12 (XXXX-XX-XX)
v1.4.13 (XXXX-XX-XX)
--------------------
* fixed too eager compaction
The compaction will now wait for several seconds before trying to re-compact the same
collection. Additionally, some other limits have been introduced for the compaction.
v1.4.12 (2014-03-05)
--------------------
* fixed display bug in web interface which caused the following problems:

View File

@ -77,12 +77,32 @@
#define COMPACTOR_MAX_FILES 4
////////////////////////////////////////////////////////////////////////////////
/// @brief maximum multiple of journal filesize of a compacted file
/// a value of 3 means that the maximum filesize of the compacted file is
/// 3 x (collection->journalSize)
////////////////////////////////////////////////////////////////////////////////
#define COMPACTOR_MAX_SIZE_FACTOR (3)
////////////////////////////////////////////////////////////////////////////////
/// @brief maximum filesize of resulting compacted file
////////////////////////////////////////////////////////////////////////////////
#define COMPACTOR_MAX_RESULT_FILESIZE (128 * 1024 * 1024)
////////////////////////////////////////////////////////////////////////////////
/// @brief datafiles smaller than the following value will be merged with others
////////////////////////////////////////////////////////////////////////////////
#define COMPACTOR_MIN_SIZE (128 * 1024)
////////////////////////////////////////////////////////////////////////////////
/// @brief re-try compaction of a specific collection in this interval (in s)
////////////////////////////////////////////////////////////////////////////////
#define COMPACTOR_COLLECTION_INTERVAL (15.0)
////////////////////////////////////////////////////////////////////////////////
/// @brief compactify interval in microseconds
////////////////////////////////////////////////////////////////////////////////
@ -266,6 +286,7 @@ static void DropDatafileCallback (TRI_datafile_t* datafile, void* data) {
TRI_FreeString(TRI_CORE_MEM_ZONE, name);
if (datafile->isPhysical(datafile)) {
// copy the current filename
copy = TRI_DuplicateStringZ(TRI_CORE_MEM_ZONE, datafile->_filename);
ok = TRI_RenameDatafile(datafile, filename);
@ -1031,6 +1052,7 @@ static void CompactifyDatafiles (TRI_document_collection_t* document,
static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
TRI_primary_collection_t* primary;
TRI_vector_t vector;
uint64_t maxSize;
int64_t numAlive;
size_t i, n;
bool compactNext;
@ -1057,6 +1079,15 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
return false;
}
// get maximum size of result file
maxSize = (uint64_t) COMPACTOR_MAX_SIZE_FACTOR * (uint64_t) primary->base._info._maximalSize;
if (maxSize < 8 * 1024 * 1024) {
maxSize = 8 * 1024 * 1024;
}
if (maxSize >= COMPACTOR_MAX_RESULT_FILESIZE) {
maxSize = COMPACTOR_MAX_RESULT_FILESIZE;
}
// copy datafile information
TRI_InitVector(&vector, TRI_UNKNOWN_MEM_ZONE, sizeof(compaction_info_t));
numAlive = 0;
@ -1065,6 +1096,7 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
TRI_datafile_t* df;
TRI_doc_datafile_info_t* dfi;
compaction_info_t compaction;
uint64_t totalSize = 0;
bool shouldCompact;
df = primary->base._datafiles._buffer[i];
@ -1076,7 +1108,7 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
if (dfi == NULL) {
continue;
}
shouldCompact = false;
if (! compactNext &&
@ -1086,8 +1118,8 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
shouldCompact = true;
compactNext = true;
}
else if (numAlive == 0 && dfi->_numberDeletion > 0) {
// compact first datafile already if it has got some deletions
else if (numAlive == 0 && dfi->_numberAlive == 0 && dfi->_numberDeletion > 0) {
// compact first datafile(s) already if they have some deletions
shouldCompact = true;
compactNext = true;
}
@ -1136,6 +1168,8 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
(unsigned long long) dfi->_sizeTransaction,
(unsigned long long) dfi->_sizeShapes,
(unsigned long long) dfi->_sizeAttributes);
totalSize += (uint64_t) df->_maximalSize;
compaction._datafile = df;
compaction._keepDeletions = (numAlive > 0 && i > 0);
@ -1149,7 +1183,8 @@ static bool CompactifyDocumentCollection (TRI_document_collection_t* document) {
// delete the collection in the middle of compaction, but the compactor
// will not pick this up as it is read-locking the collection status)
if (TRI_LengthVector(&vector) >= COMPACTOR_MAX_FILES) {
if (TRI_LengthVector(&vector) >= COMPACTOR_MAX_FILES ||
totalSize >= maxSize) {
// found enough to compact
break;
}
@ -1454,10 +1489,11 @@ void TRI_CompactorVocBase (void* data) {
while (true) {
// keep initial _state value as vocbase->_state might change during compaction loop
int state = vocbase->_state;
// check if compaction is currently disallowed
if (CheckAndLockCompaction(vocbase)) {
// compaction is currently allowed
double now = TRI_microtime();
size_t i, n;
numCompacted = 0;
@ -1503,16 +1539,24 @@ void TRI_CompactorVocBase (void* data) {
continue;
}
ce = TRI_CreateBarrierCompaction(&primary->_barrierList);
if (primary->_lastCompaction + COMPACTOR_COLLECTION_INTERVAL <= now) {
ce = TRI_CreateBarrierCompaction(&primary->_barrierList);
if (ce == NULL) {
// out of memory
LOG_WARNING("out of memory when trying to create a barrier element");
}
else {
worked = CompactifyDocumentCollection((TRI_document_collection_t*) primary);
if (ce == NULL) {
// out of memory
LOG_WARNING("out of memory when trying to create a barrier element");
}
else {
worked = CompactifyDocumentCollection((TRI_document_collection_t*) primary);
if (! worked) {
// set compaction stamp
primary->_lastCompaction = now;
}
// if we worked, then we don't set the compaction stamp to force another round of compaction
TRI_FreeBarrier(ce);
TRI_FreeBarrier(ce);
}
}
// read-unlock the compaction lock

View File

@ -582,6 +582,7 @@ int TRI_InitPrimaryCollection (TRI_primary_collection_t* primary,
primary->_capConstraint = NULL;
primary->_keyGenerator = NULL;
primary->_numberDocuments = 0;
primary->_lastCompaction = 0.0;
primary->figures = Figures;
primary->size = Count;

View File

@ -311,6 +311,7 @@ typedef struct TRI_primary_collection_s {
int64_t _numberDocuments;
TRI_read_write_lock_t _compactionLock;
double _lastCompaction;
int (*beginRead) (struct TRI_primary_collection_s*);
int (*endRead) (struct TRI_primary_collection_s*);