mirror of https://gitee.com/bigwinds/arangodb
added optional cache cleanup during collection load
This commit is contained in:
parent
c9a2c028fa
commit
2032d61039
|
@ -253,8 +253,15 @@ bool MMFilesCollection::OpenIterator(TRI_df_marker_t const* marker, MMFilesColle
|
|||
if (tick > datafile->_dataMax) {
|
||||
datafile->_dataMax = tick;
|
||||
}
|
||||
|
||||
if (++data->_operations % 1024 == 0) {
|
||||
data->_mmdr.clear(256);
|
||||
}
|
||||
} else if (type == TRI_DF_MARKER_VPACK_REMOVE) {
|
||||
res = OpenIteratorHandleDeletionMarker(marker, datafile, data);
|
||||
if (++data->_operations % 1024 == 0) {
|
||||
data->_mmdr.clear(256);
|
||||
}
|
||||
} else {
|
||||
if (type == TRI_DF_MARKER_HEADER) {
|
||||
// ensure there is a datafile info entry for each datafile of the
|
||||
|
|
|
@ -59,6 +59,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
IndexLookupContext _context;
|
||||
uint64_t _deletions;
|
||||
uint64_t _documents;
|
||||
uint64_t _operations;
|
||||
int64_t _initialCount;
|
||||
bool const _trackKeys;
|
||||
|
||||
|
@ -74,6 +75,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
_context(trx, collection, &_mmdr, 1),
|
||||
_deletions(0),
|
||||
_documents(0),
|
||||
_operations(0),
|
||||
_initialCount(-1),
|
||||
_trackKeys(collection->keyGenerator()->trackKeys()) {
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
|
|
@ -3341,6 +3341,10 @@ void Transaction::addChunk(RevisionCacheChunk* chunk) {
|
|||
_transactionContext->addChunk(chunk);
|
||||
}
|
||||
|
||||
void Transaction::clearChunks(size_t threshold) {
|
||||
_transactionContext->clearChunks(threshold);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get list of indexes for a collection
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -668,6 +668,7 @@ class Transaction {
|
|||
virtual Transaction* clone() const;
|
||||
|
||||
void addChunk(RevisionCacheChunk* chunk);
|
||||
void clearChunks(size_t threshold);
|
||||
|
||||
private:
|
||||
|
||||
|
|
|
@ -189,6 +189,17 @@ void TransactionContext::addChunk(RevisionCacheChunk* chunk) {
|
|||
// now need to keep track of it twice
|
||||
chunk->release();
|
||||
}
|
||||
|
||||
// clear chunks if they use too much memory
|
||||
void TransactionContext::clearChunks(size_t threshold) {
|
||||
MUTEX_LOCKER(locker, _chunksLock);
|
||||
if (_chunks.size() > threshold) {
|
||||
for (auto& chunk : _chunks) {
|
||||
chunk->release();
|
||||
}
|
||||
_chunks.clear();
|
||||
}
|
||||
}
|
||||
|
||||
void TransactionContext::stealChunks(std::unordered_set<RevisionCacheChunk*>& target) {
|
||||
target.clear();
|
||||
|
|
|
@ -101,6 +101,7 @@ class TransactionContext {
|
|||
DocumentDitch* ditch(TRI_voc_cid_t) const;
|
||||
|
||||
void addChunk(RevisionCacheChunk*);
|
||||
void clearChunks(size_t threshold);
|
||||
|
||||
void stealChunks(std::unordered_set<RevisionCacheChunk*>&);
|
||||
|
||||
|
|
|
@ -198,18 +198,6 @@ void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, Revision
|
|||
}
|
||||
}
|
||||
|
||||
// insert from WAL
|
||||
void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, wal::Logfile* logfile, uint32_t offset, bool shouldLock) {
|
||||
CONDITIONAL_WRITE_LOCKER(locker, _lock, shouldLock);
|
||||
|
||||
int res = _revisions.insert(nullptr, RevisionCacheEntry(revisionId, logfile, offset));
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
_revisions.removeByKey(nullptr, &revisionId);
|
||||
_revisions.insert(nullptr, RevisionCacheEntry(revisionId, logfile, offset));
|
||||
}
|
||||
}
|
||||
|
||||
// remove a revision
|
||||
void CollectionRevisionsCache::removeRevision(TRI_voc_rid_t revisionId) {
|
||||
WRITE_LOCKER(locker, _lock);
|
||||
|
|
|
@ -74,9 +74,6 @@ class CollectionRevisionsCache {
|
|||
// insert from chunk
|
||||
void insertRevision(TRI_voc_rid_t revisionId, RevisionCacheChunk* chunk, uint32_t offset, uint32_t version, bool shouldLock);
|
||||
|
||||
// insert from WAL
|
||||
void insertRevision(TRI_voc_rid_t revisionId, wal::Logfile* logfile, uint32_t offset, bool shouldLock);
|
||||
|
||||
// remove a revision
|
||||
void removeRevision(TRI_voc_rid_t revisionId);
|
||||
|
||||
|
|
|
@ -73,14 +73,14 @@ void ManagedDocumentResult::addExisting(ChunkProtector& protector, TRI_voc_rid_t
|
|||
_lastRevisionId = revisionId;
|
||||
}
|
||||
|
||||
/*
|
||||
void ManagedDocumentResult::clear() {
|
||||
if (_chunk != nullptr) {
|
||||
_chunk->release();
|
||||
_chunk = nullptr;
|
||||
void ManagedDocumentResult::clear(size_t threshold) {
|
||||
if (_trx != nullptr) {
|
||||
_trx->clearChunks(threshold);
|
||||
}
|
||||
_vpack = nullptr;
|
||||
_lastRevisionId = 0;
|
||||
_chunkCache.clear();
|
||||
}
|
||||
*/
|
||||
|
||||
ManagedDocumentResult& ManagedDocumentResult::operator=(ManagedDocumentResult const& other) {
|
||||
if (this != &other) {
|
||||
|
|
|
@ -39,6 +39,13 @@ struct ChunkCache {
|
|||
|
||||
ChunkCache() : _chunksUsed(0), _chunksArray() {}
|
||||
|
||||
void clear() {
|
||||
if (_chunksUsed > STATIC_ARRAY_SIZE) {
|
||||
_chunksHash.clear();
|
||||
}
|
||||
_chunksUsed = 0;
|
||||
}
|
||||
|
||||
void add(RevisionCacheChunk* chunk) {
|
||||
if (_chunksUsed <= STATIC_ARRAY_SIZE) {
|
||||
if (_chunksUsed == STATIC_ARRAY_SIZE) {
|
||||
|
@ -110,7 +117,7 @@ class ManagedDocumentResult {
|
|||
_vpack = vpack;
|
||||
}
|
||||
|
||||
//void clear();
|
||||
void clear(size_t threshold);
|
||||
|
||||
private:
|
||||
Transaction* _trx;
|
||||
|
|
Loading…
Reference in New Issue