1
0
Fork 0

fixed invalid pointers when compacting deletion markers

fixed invalid pointers for updates
This commit is contained in:
Jan Steemann 2013-03-06 14:02:32 +01:00
parent eedbd6e770
commit 5098792b4f
4 changed files with 77 additions and 33 deletions

View File

@ -808,19 +808,24 @@ static v8::Handle<v8::Value> DocumentVocbaseCol (const bool useCollection,
assert(col);
assert(key);
SingleCollectionReadOnlyTransaction<EmbeddableTransaction<V8TransactionContext> > trx(vocbase, resolver, col->_cid);
int res = trx.begin();
if (res != TRI_ERROR_NO_ERROR) {
return scope.Close(v8::ThrowException(TRI_CreateErrorObject(res, "cannot fetch document", true)));
}
TRI_barrier_t* barrier = TRI_CreateBarrierElement(trx.barrierList());
if (barrier == 0) {
return scope.Close(v8::ThrowException(TRI_CreateErrorObject(TRI_ERROR_OUT_OF_MEMORY)));
}
assert(barrier != 0);
v8::Handle<v8::Value> result;
TRI_doc_mptr_t document;
res = trx.read(&document, key, true);
if (res == TRI_ERROR_NO_ERROR) {
TRI_barrier_t* barrier = TRI_CreateBarrierElement(trx.barrierList());
result = TRI_WrapShapedJson(resolver, col, &document, barrier);
}
@ -831,6 +836,8 @@ static v8::Handle<v8::Value> DocumentVocbaseCol (const bool useCollection,
}
if (document._key == 0 || document._data == 0) {
TRI_FreeBarrier(barrier);
return scope.Close(v8::ThrowException(
TRI_CreateErrorObject(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND,
"document not found")));

View File

@ -91,13 +91,31 @@ static void CleanupDocumentCollection (TRI_document_collection_t* sim) {
// check and remove all callback elements at the beginning of the list
TRI_LockSpin(&container->_lock);
// check the element on top of the barrier list
// if it is a TRI_BARRIER_ELEMENT, it means that there is still a reference held
// to document data in a datafile. We must then not unload or remove a file
if (container->_begin == NULL || container->_begin->_type == TRI_BARRIER_ELEMENT) {
// did not find anything on top of the barrier list or found an element marker
// this means we must exit
// did not find anything at the head of the barrier list or found an element marker
// this means we must exit and cannot throw away datafiles and can unload collections
TRI_UnlockSpin(&container->_lock);
return;
}
// no TRI_BARRIER_ELEMENT at the head of the barrier list. This means that there is
// some other action we can perform (i.e. unloading a datafile or a collection)
// note that there is no need to check the entire list for a TRI_BARRIER_ELEMENT as
// the list is filled up in chronological order. New barriers are always added to the
// tail of the list, and if we have the following list
// HEAD -> TRI_BARRIER_DATAFILE_CALLBACK -> TRI_BARRIER_ELEMENT
// then it is still safe to execute the datafile callback operation, even if there
// is a TRI_BARRIER_ELEMENT after it.
// This is the case because the TRI_BARRIER_DATAFILE_CALLBACK is only put into the
// barrier list after changing the pointers in all headers. After the pointers are
// changed, it is safe to unload/remove an old datafile (that noone points to). And
// any newer TRI_BARRIER_ELEMENTS will always reference data inside other datafiles.
element = container->_begin;
assert(element);
@ -111,8 +129,12 @@ static void CleanupDocumentCollection (TRI_document_collection_t* sim) {
element->_next->_prev = NULL;
}
// yes, we can release the lock here
TRI_UnlockSpin(&container->_lock);
// someone else might now insert a new TRI_BARRIER_ELEMENT here, but it will
// always refer to a different datafile than the one that we will now unload
// execute callback, sone of the callbacks might delete or free our collection
if (element->_type == TRI_BARRIER_DATAFILE_CALLBACK) {
TRI_barrier_datafile_cb_t* de;

View File

@ -72,7 +72,7 @@ static int const COMPACTOR_INTERVAL = (1 * 1000 * 1000);
/// to allow the gc to start when waiting for a journal to appear.
////////////////////////////////////////////////////////////////////////////////
static TRI_datafile_t* SelectCompactor (TRI_document_collection_t* sim,
static TRI_datafile_t* SelectCompactor (TRI_document_collection_t* document,
TRI_voc_size_t size,
TRI_df_marker_t** result) {
TRI_datafile_t* datafile;
@ -80,34 +80,34 @@ static TRI_datafile_t* SelectCompactor (TRI_document_collection_t* sim,
size_t i;
size_t n;
TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
TRI_LOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
while (true) {
n = sim->base.base._compactors._length;
n = document->base.base._compactors._length;
for (i = 0; i < n; ++i) {
// select datafile
datafile = sim->base.base._compactors._buffer[i];
datafile = document->base.base._compactors._buffer[i];
// try to reserve space
res = TRI_ReserveElementDatafile(datafile, size, result);
// in case of full datafile, try next
if (res == TRI_ERROR_NO_ERROR) {
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
return datafile;
}
else if (res != TRI_ERROR_ARANGO_DATAFILE_FULL) {
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
return NULL;
}
}
TRI_WAIT_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
TRI_WAIT_JOURNAL_ENTRIES_DOC_COLLECTION(document);
}
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(sim);
TRI_UNLOCK_JOURNAL_ENTRIES_DOC_COLLECTION(document);
}
////////////////////////////////////////////////////////////////////////////////
@ -268,16 +268,12 @@ static bool Compactifier (TRI_df_marker_t const* marker, void* data, TRI_datafil
}
// check if the document is still active
TRI_READ_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
TRI_WRITE_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
found = TRI_LookupByKeyAssociativePointer(&primary->_primaryIndex,((char*) d + d->_offsetKey));
deleted = found == NULL || found->_validTo != 0;
TRI_READ_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
// update datafile
TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary);
dfi = TRI_FindDatafileInfoPrimaryCollection(primary, fid);
if (deleted) {
@ -285,13 +281,13 @@ static bool Compactifier (TRI_df_marker_t const* marker, void* data, TRI_datafil
dfi->_sizeDead += marker->_size - markerSize - keyBodySize;
LOG_DEBUG("found a stale document after copying: %s", ((char*) d + d->_offsetKey));
TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
TRI_WRITE_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
return true;
}
cnv.c = found;
cnv.v->_fid = datafile->_fid;
cnv.v->_fid = fid;
cnv.v->_data = result;
// let _key point to the new key position
cnv.v->_key = ((char*) result) + (((TRI_doc_document_key_marker_t*) result)->_offsetKey);
@ -300,11 +296,13 @@ static bool Compactifier (TRI_df_marker_t const* marker, void* data, TRI_datafil
dfi->_numberAlive += 1;
dfi->_sizeAlive += marker->_size - markerSize - keyBodySize;
TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
TRI_WRITE_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
}
// deletion
else if (marker->_type == TRI_DOC_MARKER_KEY_DELETION) {
TRI_doc_deletion_key_marker_t const* d = (TRI_doc_deletion_key_marker_t const*) marker;
// write to compactor files
res = CopyDocument(doc, marker, &result, &fid);
@ -313,12 +311,21 @@ static bool Compactifier (TRI_df_marker_t const* marker, void* data, TRI_datafil
}
// update datafile info
TRI_WRITE_LOCK_DATAFILES_DOC_COLLECTION(primary);
TRI_WRITE_LOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
found = TRI_LookupByKeyAssociativePointer(&primary->_primaryIndex,((char*) d + d->_offsetKey));
if (found != NULL) {
cnv.c = found;
cnv.v->_fid = fid;
cnv.v->_data = result;
// let _key point to the new key position
cnv.v->_key = ((char*) result) + (((TRI_doc_deletion_key_marker_t*) result)->_offsetKey);
}
dfi = TRI_FindDatafileInfoPrimaryCollection(primary, fid);
dfi->_numberDeletion += 1;
TRI_WRITE_UNLOCK_DATAFILES_DOC_COLLECTION(primary);
TRI_WRITE_UNLOCK_DOCUMENTS_INDEXES_PRIMARY_COLLECTION(primary);
}
return true;
@ -504,7 +511,7 @@ void TRI_CompactorVocBase (void* data) {
TRI_col_type_e type;
// keep initial _state value as vocbase->_state might change during compaction loop
int state = vocbase->_state;
bool worked = false;
bool worked;
// copy all collections
TRI_READ_LOCK_COLLECTIONS_VOCBASE(vocbase);
@ -534,6 +541,7 @@ void TRI_CompactorVocBase (void* data) {
continue;
}
worked = false;
type = primary->base._info._type;
// for simple collection, compactify datafiles
@ -541,8 +549,13 @@ void TRI_CompactorVocBase (void* data) {
if (collection->_status == TRI_VOC_COL_STATUS_LOADED) {
TRI_barrier_t* ce = TRI_CreateBarrierElement(&primary->_barrierList);
worked = CompactifyDocumentCollection((TRI_document_collection_t*) primary);
if (ce != NULL) {
if (ce == NULL) {
// out of memory
LOG_WARNING("out of memory when trying to create a barrier element");
}
else {
worked = CompactifyDocumentCollection((TRI_document_collection_t*) primary);
TRI_FreeBarrier(ce);
}
}

View File

@ -545,9 +545,10 @@ static void UpdateHeader (TRI_datafile_t* datafile,
marker = (TRI_doc_document_key_marker_t const*) m;
*update = *header;
update->_rid = marker->_rid;
update->_fid = datafile->_fid;
update->_rid = marker->_rid;
update->_fid = datafile->_fid;
update->_data = marker;
update->_key = ((char*) marker) + marker->_offsetKey;
}
////////////////////////////////////////////////////////////////////////////////
@ -1464,13 +1465,12 @@ static bool OpenIterator (TRI_df_marker_t const* marker, void* data, TRI_datafil
header = collection->_headers->request(collection->_headers);
// TODO: header might be NULL and must be checked
header = collection->_headers->verify(collection->_headers, header);
header->_rid = d->_rid;
header->_rid = d->_rid;
header->_validFrom = marker->_tick;
header->_validTo = marker->_tick; // TODO: fix for trx
header->_data = 0;
header->_key = key;
header->_data = marker;
header->_key = key;
// update immediate indexes
CreateImmediateIndexes(collection, header);
@ -1491,6 +1491,8 @@ static bool OpenIterator (TRI_df_marker_t const* marker, void* data, TRI_datafil
change.c = found;
change.v->_validFrom = marker->_tick;
change.v->_validTo = marker->_tick; // TODO: fix for trx
change.v->_data = marker;
change.v->_key = key;
// update the datafile info
dfi = TRI_FindDatafileInfoPrimaryCollection(primary, found->_fid);