mirror of https://gitee.com/bigwinds/arangodb
performance optimizations
This commit is contained in:
parent
5b054a7db7
commit
ab4933c994
|
@ -383,7 +383,7 @@ Builder& Builder::close() {
|
||||||
_start[tos] = 0x0b;
|
_start[tos] = 0x0b;
|
||||||
|
|
||||||
// First determine byte length and its format:
|
// First determine byte length and its format:
|
||||||
unsigned int offsetSize;
|
unsigned int offsetSize = 8;
|
||||||
// can be 1, 2, 4 or 8 for the byte width of the offsets,
|
// can be 1, 2, 4 or 8 for the byte width of the offsets,
|
||||||
// the byte length and the number of subvalues:
|
// the byte length and the number of subvalues:
|
||||||
if (_pos - tos + index.size() - 6 <= 0xff) {
|
if (_pos - tos + index.size() - 6 <= 0xff) {
|
||||||
|
@ -392,16 +392,8 @@ Builder& Builder::close() {
|
||||||
// case we would win back 6 bytes but would need one byte per subvalue
|
// case we would win back 6 bytes but would need one byte per subvalue
|
||||||
// for the index table
|
// for the index table
|
||||||
offsetSize = 1;
|
offsetSize = 1;
|
||||||
} else if (_pos - tos + 2 * index.size() <= 0xffff) {
|
|
||||||
offsetSize = 2;
|
|
||||||
} else if (_pos - tos + 4 * index.size() <= 0xffffffffu) {
|
|
||||||
offsetSize = 4;
|
|
||||||
} else {
|
|
||||||
offsetSize = 8;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Maybe we need to move down data:
|
// Maybe we need to move down data:
|
||||||
if (offsetSize == 1) {
|
|
||||||
ValueLength targetPos = 3;
|
ValueLength targetPos = 3;
|
||||||
if (_pos > (tos + 9)) {
|
if (_pos > (tos + 9)) {
|
||||||
ValueLength len = _pos - (tos + 9);
|
ValueLength len = _pos - (tos + 9);
|
||||||
|
@ -413,15 +405,19 @@ Builder& Builder::close() {
|
||||||
for (size_t i = 0; i < n; i++) {
|
for (size_t i = 0; i < n; i++) {
|
||||||
index[i] -= diff;
|
index[i] -= diff;
|
||||||
}
|
}
|
||||||
}
|
|
||||||
// One could move down things in the offsetSize == 2 case as well,
|
// One could move down things in the offsetSize == 2 case as well,
|
||||||
// since we only need 4 bytes in the beginning. However, saving these
|
// since we only need 4 bytes in the beginning. However, saving these
|
||||||
// 4 bytes has been sacrificed on the Altar of Performance.
|
// 4 bytes has been sacrificed on the Altar of Performance.
|
||||||
|
} else if (_pos - tos + 2 * index.size() <= 0xffff) {
|
||||||
|
offsetSize = 2;
|
||||||
|
} else if (_pos - tos + 4 * index.size() <= 0xffffffffu) {
|
||||||
|
offsetSize = 4;
|
||||||
|
}
|
||||||
|
|
||||||
// Now build the table:
|
// Now build the table:
|
||||||
ValueLength tableBase;
|
|
||||||
reserveSpace(offsetSize * index.size() + (offsetSize == 8 ? 8 : 0));
|
reserveSpace(offsetSize * index.size() + (offsetSize == 8 ? 8 : 0));
|
||||||
tableBase = _pos;
|
ValueLength tableBase = _pos;
|
||||||
_pos += offsetSize * index.size();
|
_pos += offsetSize * index.size();
|
||||||
// Object
|
// Object
|
||||||
if (index.size() >= 2) {
|
if (index.size() >= 2) {
|
||||||
|
|
|
@ -80,7 +80,7 @@ struct ConstDistanceExpanderLocal {
|
||||||
void operator()(VPackSlice const& v, std::vector<VPackSlice>& resEdges,
|
void operator()(VPackSlice const& v, std::vector<VPackSlice>& resEdges,
|
||||||
std::vector<VPackSlice>& neighbors) {
|
std::vector<VPackSlice>& neighbors) {
|
||||||
ManagedDocumentResult* mmdr = _block->_mmdr.get();
|
ManagedDocumentResult* mmdr = _block->_mmdr.get();
|
||||||
std::shared_ptr<arangodb::OperationCursor> edgeCursor;
|
std::unique_ptr<arangodb::OperationCursor> edgeCursor;
|
||||||
for (auto const& edgeCollection : _block->_collectionInfos) {
|
for (auto const& edgeCollection : _block->_collectionInfos) {
|
||||||
TRI_ASSERT(edgeCollection != nullptr);
|
TRI_ASSERT(edgeCollection != nullptr);
|
||||||
if (_isReverse) {
|
if (_isReverse) {
|
||||||
|
|
|
@ -691,8 +691,15 @@ IndexIterator* EdgeIndex::iteratorForSlice(
|
||||||
// Invalid searchValue
|
// Invalid searchValue
|
||||||
return nullptr;
|
return nullptr;
|
||||||
}
|
}
|
||||||
VPackSlice const from = searchValues.at(0);
|
|
||||||
VPackSlice const to = searchValues.at(1);
|
VPackArrayIterator it(searchValues);
|
||||||
|
TRI_ASSERT(it.valid());
|
||||||
|
|
||||||
|
VPackSlice const from = it.value();
|
||||||
|
|
||||||
|
it.next();
|
||||||
|
TRI_ASSERT(it.valid());
|
||||||
|
VPackSlice const to = it.value();
|
||||||
|
|
||||||
if (!from.isNull()) {
|
if (!from.isNull()) {
|
||||||
TRI_ASSERT(from.isArray());
|
TRI_ASSERT(from.isArray());
|
||||||
|
|
|
@ -974,6 +974,7 @@ int InitialSyncer::handleCollectionSync(
|
||||||
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
return TRI_ERROR_REPLICATION_INVALID_RESPONSE;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
if (count.getNumber<size_t>() <= 0) {
|
if (count.getNumber<size_t>() <= 0) {
|
||||||
// remote collection has no documents. now truncate our local collection
|
// remote collection has no documents. now truncate our local collection
|
||||||
SingleCollectionTransaction trx(StandaloneTransactionContext::Create(_vocbase), col->cid(), TRI_TRANSACTION_WRITE);
|
SingleCollectionTransaction trx(StandaloneTransactionContext::Create(_vocbase), col->cid(), TRI_TRANSACTION_WRITE);
|
||||||
|
@ -1056,6 +1057,12 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
|
||||||
|
|
||||||
TRI_DEFER(col->ditches()->freeDitch(ditch));
|
TRI_DEFER(col->ditches()->freeDitch(ditch));
|
||||||
|
|
||||||
|
std::unordered_set<RevisionCacheChunk*> chunks;
|
||||||
|
auto cleanupChunks = [&chunks]() {
|
||||||
|
for (auto& chunk : chunks) { chunk->release(); }
|
||||||
|
};
|
||||||
|
TRI_DEFER(cleanupChunks());
|
||||||
|
|
||||||
{
|
{
|
||||||
SingleCollectionTransaction trx(StandaloneTransactionContext::Create(_vocbase), col->cid(), TRI_TRANSACTION_READ);
|
SingleCollectionTransaction trx(StandaloneTransactionContext::Create(_vocbase), col->cid(), TRI_TRANSACTION_READ);
|
||||||
|
|
||||||
|
@ -1087,6 +1094,8 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col,
|
||||||
return true;
|
return true;
|
||||||
});
|
});
|
||||||
|
|
||||||
|
trx.transactionContext()->stealChunks(chunks);
|
||||||
|
|
||||||
if (checkAborted()) {
|
if (checkAborted()) {
|
||||||
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
|
return TRI_ERROR_REPLICATION_APPLIER_STOPPED;
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,6 +31,7 @@
|
||||||
#include "Indexes/PrimaryIndex.h"
|
#include "Indexes/PrimaryIndex.h"
|
||||||
#include "Logger/Logger.h"
|
#include "Logger/Logger.h"
|
||||||
#include "RestServer/DatabaseFeature.h"
|
#include "RestServer/DatabaseFeature.h"
|
||||||
|
#include "StorageEngine/EngineSelectorFeature.h"
|
||||||
#include "StorageEngine/MMFilesDocumentPosition.h"
|
#include "StorageEngine/MMFilesDocumentPosition.h"
|
||||||
#include "StorageEngine/StorageEngine.h"
|
#include "StorageEngine/StorageEngine.h"
|
||||||
#include "Utils/SingleCollectionTransaction.h"
|
#include "Utils/SingleCollectionTransaction.h"
|
||||||
|
@ -73,16 +74,20 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* m
|
||||||
TRI_ASSERT(trx != nullptr);
|
TRI_ASSERT(trx != nullptr);
|
||||||
|
|
||||||
VPackSlice const slice(reinterpret_cast<char const*>(marker) + DatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT));
|
VPackSlice const slice(reinterpret_cast<char const*>(marker) + DatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT));
|
||||||
|
uint8_t const* vpack = slice.begin();
|
||||||
|
|
||||||
VPackSlice keySlice;
|
VPackSlice keySlice;
|
||||||
TRI_voc_rid_t revisionId;
|
TRI_voc_rid_t revisionId;
|
||||||
|
|
||||||
Transaction::extractKeyAndRevFromDocument(slice, keySlice, revisionId);
|
Transaction::extractKeyAndRevFromDocument(slice, keySlice, revisionId);
|
||||||
|
|
||||||
collection->setRevision(revisionId, false);
|
c->setRevision(revisionId, false);
|
||||||
|
|
||||||
|
if (state->_trackKeys) {
|
||||||
VPackValueLength length;
|
VPackValueLength length;
|
||||||
char const* p = keySlice.getString(length);
|
char const* p = keySlice.getString(length);
|
||||||
collection->keyGenerator()->track(p, length);
|
collection->keyGenerator()->track(p, length);
|
||||||
|
}
|
||||||
|
|
||||||
++state->_documents;
|
++state->_documents;
|
||||||
|
|
||||||
|
@ -93,19 +98,16 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* m
|
||||||
state->_dfi = FindDatafileStats(state, fid);
|
state->_dfi = FindDatafileStats(state, fid);
|
||||||
}
|
}
|
||||||
|
|
||||||
auto primaryIndex = collection->primaryIndex();
|
|
||||||
|
|
||||||
// no primary index lock required here because we are the only ones reading
|
// no primary index lock required here because we are the only ones reading
|
||||||
// from the index ATM
|
// from the index ATM
|
||||||
SimpleIndexElement* found = primaryIndex->lookupKeyRef(trx, keySlice, state->_mmdr);
|
SimpleIndexElement* found = state->_primaryIndex->lookupKeyRef(trx, keySlice, state->_mmdr);
|
||||||
|
|
||||||
// it is a new entry
|
// it is a new entry
|
||||||
if (found == nullptr || found->revisionId() == 0) {
|
if (found == nullptr || found->revisionId() == 0) {
|
||||||
uint8_t const* vpack = reinterpret_cast<uint8_t const*>(marker) + arangodb::DatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT);
|
c->insertRevision(revisionId, vpack, fid, false, false);
|
||||||
c->insertRevision(revisionId, vpack, fid, false);
|
|
||||||
|
|
||||||
// insert into primary index
|
// insert into primary index
|
||||||
int res = primaryIndex->insertKey(trx, revisionId, VPackSlice(vpack), state->_mmdr);
|
int res = state->_primaryIndex->insertKey(trx, revisionId, VPackSlice(vpack), state->_mmdr);
|
||||||
|
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
c->removeRevision(revisionId, false);
|
c->removeRevision(revisionId, false);
|
||||||
|
@ -121,10 +123,9 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* m
|
||||||
|
|
||||||
// it is an update
|
// it is an update
|
||||||
else {
|
else {
|
||||||
uint8_t const* vpack = reinterpret_cast<uint8_t const*>(marker) + arangodb::DatafileHelper::VPackOffset(TRI_DF_MARKER_VPACK_DOCUMENT);
|
|
||||||
TRI_voc_rid_t const oldRevisionId = found->revisionId();
|
TRI_voc_rid_t const oldRevisionId = found->revisionId();
|
||||||
// update the revision id in primary index
|
// update the revision id in primary index
|
||||||
found->updateRevisionId(revisionId, static_cast<uint32_t>(keySlice.begin() - slice.begin()));
|
found->updateRevisionId(revisionId, static_cast<uint32_t>(keySlice.begin() - vpack));
|
||||||
|
|
||||||
MMFilesDocumentPosition const old = c->lookupRevision(oldRevisionId);
|
MMFilesDocumentPosition const old = c->lookupRevision(oldRevisionId);
|
||||||
|
|
||||||
|
@ -132,7 +133,7 @@ int MMFilesCollection::OpenIteratorHandleDocumentMarker(TRI_df_marker_t const* m
|
||||||
c->removeRevision(oldRevisionId, false);
|
c->removeRevision(oldRevisionId, false);
|
||||||
|
|
||||||
// insert new revision
|
// insert new revision
|
||||||
c->insertRevision(revisionId, vpack, fid, false);
|
c->insertRevision(revisionId, vpack, fid, false, false);
|
||||||
|
|
||||||
// update the datafile info
|
// update the datafile info
|
||||||
DatafileStatisticsContainer* dfi;
|
DatafileStatisticsContainer* dfi;
|
||||||
|
@ -174,10 +175,12 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(TRI_df_marker_t const* m
|
||||||
|
|
||||||
Transaction::extractKeyAndRevFromDocument(slice, keySlice, revisionId);
|
Transaction::extractKeyAndRevFromDocument(slice, keySlice, revisionId);
|
||||||
|
|
||||||
collection->setRevision(revisionId, false);
|
c->setRevision(revisionId, false);
|
||||||
|
if (state->_trackKeys) {
|
||||||
VPackValueLength length;
|
VPackValueLength length;
|
||||||
char const* p = keySlice.getString(length);
|
char const* p = keySlice.getString(length);
|
||||||
collection->keyGenerator()->track(p, length);
|
collection->keyGenerator()->track(p, length);
|
||||||
|
}
|
||||||
|
|
||||||
++state->_deletions;
|
++state->_deletions;
|
||||||
|
|
||||||
|
@ -189,8 +192,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(TRI_df_marker_t const* m
|
||||||
|
|
||||||
// no primary index lock required here because we are the only ones reading
|
// no primary index lock required here because we are the only ones reading
|
||||||
// from the index ATM
|
// from the index ATM
|
||||||
auto primaryIndex = collection->primaryIndex();
|
SimpleIndexElement found = state->_primaryIndex->lookupKey(trx, keySlice, state->_mmdr);
|
||||||
SimpleIndexElement found = primaryIndex->lookupKey(trx, keySlice, state->_mmdr);
|
|
||||||
|
|
||||||
// it is a new entry, so we missed the create
|
// it is a new entry, so we missed the create
|
||||||
if (!found) {
|
if (!found) {
|
||||||
|
@ -224,7 +226,7 @@ int MMFilesCollection::OpenIteratorHandleDeletionMarker(TRI_df_marker_t const* m
|
||||||
dfi->sizeDead += DatafileHelper::AlignedSize<int64_t>(size);
|
dfi->sizeDead += DatafileHelper::AlignedSize<int64_t>(size);
|
||||||
state->_dfi->numberDeletions++;
|
state->_dfi->numberDeletions++;
|
||||||
|
|
||||||
primaryIndex->removeKey(trx, oldRevisionId, VPackSlice(vpack), state->_mmdr);
|
state->_primaryIndex->removeKey(trx, oldRevisionId, VPackSlice(vpack), state->_mmdr);
|
||||||
|
|
||||||
c->removeRevision(oldRevisionId, true);
|
c->removeRevision(oldRevisionId, true);
|
||||||
}
|
}
|
||||||
|
@ -1155,10 +1157,10 @@ uint8_t const* MMFilesCollection::lookupRevisionVPackConditional(TRI_voc_rid_t r
|
||||||
return vpack;
|
return vpack;
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMFilesCollection::insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
void MMFilesCollection::insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal, bool shouldLock) {
|
||||||
TRI_ASSERT(revisionId != 0);
|
TRI_ASSERT(revisionId != 0);
|
||||||
TRI_ASSERT(dataptr != nullptr);
|
TRI_ASSERT(dataptr != nullptr);
|
||||||
_revisionsCache.insert(revisionId, dataptr, fid, isInWal);
|
_revisionsCache.insert(revisionId, dataptr, fid, isInWal, shouldLock);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMFilesCollection::updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
void MMFilesCollection::updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
||||||
|
|
|
@ -30,6 +30,7 @@
|
||||||
#include "StorageEngine/MMFilesDatafileStatistics.h"
|
#include "StorageEngine/MMFilesDatafileStatistics.h"
|
||||||
#include "StorageEngine/MMFilesRevisionsCache.h"
|
#include "StorageEngine/MMFilesRevisionsCache.h"
|
||||||
#include "VocBase/Ditch.h"
|
#include "VocBase/Ditch.h"
|
||||||
|
#include "VocBase/KeyGenerator.h"
|
||||||
#include "VocBase/ManagedDocumentResult.h"
|
#include "VocBase/ManagedDocumentResult.h"
|
||||||
#include "VocBase/PhysicalCollection.h"
|
#include "VocBase/PhysicalCollection.h"
|
||||||
|
|
||||||
|
@ -38,6 +39,7 @@ struct TRI_df_marker_t;
|
||||||
|
|
||||||
namespace arangodb {
|
namespace arangodb {
|
||||||
class LogicalCollection;
|
class LogicalCollection;
|
||||||
|
class PrimaryIndex;
|
||||||
|
|
||||||
class MMFilesCollection final : public PhysicalCollection {
|
class MMFilesCollection final : public PhysicalCollection {
|
||||||
friend class MMFilesCompactorThread;
|
friend class MMFilesCompactorThread;
|
||||||
|
@ -47,6 +49,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
/// @brief state during opening of a collection
|
/// @brief state during opening of a collection
|
||||||
struct OpenIteratorState {
|
struct OpenIteratorState {
|
||||||
LogicalCollection* _collection;
|
LogicalCollection* _collection;
|
||||||
|
arangodb::PrimaryIndex* _primaryIndex;
|
||||||
TRI_voc_tid_t _tid;
|
TRI_voc_tid_t _tid;
|
||||||
TRI_voc_fid_t _fid;
|
TRI_voc_fid_t _fid;
|
||||||
std::unordered_map<TRI_voc_fid_t, DatafileStatisticsContainer*> _stats;
|
std::unordered_map<TRI_voc_fid_t, DatafileStatisticsContainer*> _stats;
|
||||||
|
@ -57,9 +60,11 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
uint64_t _deletions;
|
uint64_t _deletions;
|
||||||
uint64_t _documents;
|
uint64_t _documents;
|
||||||
int64_t _initialCount;
|
int64_t _initialCount;
|
||||||
|
bool const _trackKeys;
|
||||||
|
|
||||||
OpenIteratorState(LogicalCollection* collection, arangodb::Transaction* trx)
|
OpenIteratorState(LogicalCollection* collection, arangodb::Transaction* trx)
|
||||||
: _collection(collection),
|
: _collection(collection),
|
||||||
|
_primaryIndex(collection->primaryIndex()),
|
||||||
_tid(0),
|
_tid(0),
|
||||||
_fid(0),
|
_fid(0),
|
||||||
_stats(),
|
_stats(),
|
||||||
|
@ -69,7 +74,8 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
_context(trx, collection, &_mmdr, 1),
|
_context(trx, collection, &_mmdr, 1),
|
||||||
_deletions(0),
|
_deletions(0),
|
||||||
_documents(0),
|
_documents(0),
|
||||||
_initialCount(-1) {
|
_initialCount(-1),
|
||||||
|
_trackKeys(collection->keyGenerator()->trackKeys()) {
|
||||||
TRI_ASSERT(collection != nullptr);
|
TRI_ASSERT(collection != nullptr);
|
||||||
TRI_ASSERT(trx != nullptr);
|
TRI_ASSERT(trx != nullptr);
|
||||||
}
|
}
|
||||||
|
@ -191,7 +197,7 @@ class MMFilesCollection final : public PhysicalCollection {
|
||||||
|
|
||||||
uint8_t const* lookupRevisionVPack(TRI_voc_rid_t revisionId) const override;
|
uint8_t const* lookupRevisionVPack(TRI_voc_rid_t revisionId) const override;
|
||||||
uint8_t const* lookupRevisionVPackConditional(TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) const override;
|
uint8_t const* lookupRevisionVPackConditional(TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) const override;
|
||||||
void insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) override;
|
void insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal, bool shouldLock) override;
|
||||||
void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) override;
|
void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) override;
|
||||||
bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) override;
|
bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) override;
|
||||||
void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) override;
|
void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) override;
|
||||||
|
|
|
@ -77,11 +77,11 @@ void MMFilesRevisionsCache::clear() {
|
||||||
_positions.truncate([](MMFilesDocumentPosition&) { return true; });
|
_positions.truncate([](MMFilesDocumentPosition&) { return true; });
|
||||||
}
|
}
|
||||||
|
|
||||||
void MMFilesRevisionsCache::insert(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
void MMFilesRevisionsCache::insert(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal, bool shouldLock) {
|
||||||
TRI_ASSERT(revisionId != 0);
|
TRI_ASSERT(revisionId != 0);
|
||||||
TRI_ASSERT(dataptr != nullptr);
|
TRI_ASSERT(dataptr != nullptr);
|
||||||
|
|
||||||
WRITE_LOCKER(locker, _lock);
|
CONDITIONAL_WRITE_LOCKER(locker, _lock, shouldLock);
|
||||||
int res = _positions.insert(nullptr, MMFilesDocumentPosition(revisionId, dataptr, fid, isInWal));
|
int res = _positions.insert(nullptr, MMFilesDocumentPosition(revisionId, dataptr, fid, isInWal));
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
_positions.removeByKey(nullptr, &revisionId);
|
_positions.removeByKey(nullptr, &revisionId);
|
||||||
|
|
|
@ -43,7 +43,7 @@ class MMFilesRevisionsCache {
|
||||||
void sizeHint(int64_t hint);
|
void sizeHint(int64_t hint);
|
||||||
void clear();
|
void clear();
|
||||||
MMFilesDocumentPosition lookup(TRI_voc_rid_t revisionId) const;
|
MMFilesDocumentPosition lookup(TRI_voc_rid_t revisionId) const;
|
||||||
void insert(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal);
|
void insert(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal, bool shouldLock);
|
||||||
void update(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal);
|
void update(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal);
|
||||||
bool updateConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal);
|
bool updateConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal);
|
||||||
void remove(TRI_voc_rid_t revisionId);
|
void remove(TRI_voc_rid_t revisionId);
|
||||||
|
|
|
@ -71,8 +71,7 @@ struct OperationResult {
|
||||||
wasSynchronous(wasSynchronous),
|
wasSynchronous(wasSynchronous),
|
||||||
countErrorCodes(countErrorCodes) {}
|
countErrorCodes(countErrorCodes) {}
|
||||||
|
|
||||||
virtual ~OperationResult() {
|
~OperationResult() {}
|
||||||
}
|
|
||||||
|
|
||||||
bool successful() const {
|
bool successful() const {
|
||||||
return code == TRI_ERROR_NO_ERROR;
|
return code == TRI_ERROR_NO_ERROR;
|
||||||
|
@ -82,7 +81,7 @@ struct OperationResult {
|
||||||
return !successful();
|
return !successful();
|
||||||
}
|
}
|
||||||
|
|
||||||
VPackSlice slice() const {
|
inline VPackSlice slice() const {
|
||||||
TRI_ASSERT(buffer != nullptr);
|
TRI_ASSERT(buffer != nullptr);
|
||||||
return VPackSlice(buffer->data());
|
return VPackSlice(buffer->data());
|
||||||
}
|
}
|
||||||
|
|
|
@ -56,7 +56,10 @@ static bool IsEqualElementElement(void*, RevisionCacheEntry const& left,
|
||||||
} // namespace
|
} // namespace
|
||||||
|
|
||||||
CollectionRevisionsCache::CollectionRevisionsCache(LogicalCollection* collection, RevisionCacheChunkAllocator* allocator)
|
CollectionRevisionsCache::CollectionRevisionsCache(LogicalCollection* collection, RevisionCacheChunkAllocator* allocator)
|
||||||
: _revisions(HashKey, HashElement, IsEqualKeyElement, IsEqualElementElement, IsEqualElementElement, 8, [this]() -> std::string { return std::string("revisions for ") + this->_collection->name(); }), _collection(collection), _readCache(allocator, this) {}
|
: _revisions(HashKey, HashElement, IsEqualKeyElement, IsEqualElementElement, IsEqualElementElement, 8, [this]() -> std::string { return std::string("revisions for ") + this->_collection->name(); }),
|
||||||
|
_collection(collection),
|
||||||
|
_readCache(allocator, this),
|
||||||
|
_allowInvalidation(true) {}
|
||||||
|
|
||||||
CollectionRevisionsCache::~CollectionRevisionsCache() {
|
CollectionRevisionsCache::~CollectionRevisionsCache() {
|
||||||
try {
|
try {
|
||||||
|
@ -103,7 +106,7 @@ bool CollectionRevisionsCache::lookupRevision(Transaction* trx, ManagedDocumentR
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
READ_LOCKER(locker, _lock);
|
CONDITIONAL_READ_LOCKER(locker, _lock, shouldLock);
|
||||||
|
|
||||||
RevisionCacheEntry found = _revisions.findByKey(nullptr, &revisionId);
|
RevisionCacheEntry found = _revisions.findByKey(nullptr, &revisionId);
|
||||||
|
|
||||||
|
@ -119,7 +122,7 @@ bool CollectionRevisionsCache::lookupRevision(Transaction* trx, ManagedDocumentR
|
||||||
ChunkProtector protector = _readCache.insertAndLease(revisionId, reinterpret_cast<uint8_t const*>(logfile->data() + found.offset()), result);
|
ChunkProtector protector = _readCache.insertAndLease(revisionId, reinterpret_cast<uint8_t const*>(logfile->data() + found.offset()), result);
|
||||||
// must have succeeded (otherwise an exception was thrown)
|
// must have succeeded (otherwise an exception was thrown)
|
||||||
// and insert result into the hash
|
// and insert result into the hash
|
||||||
insertRevision(revisionId, protector.chunk(), protector.offset(), protector.version());
|
insertRevision(revisionId, protector.chunk(), protector.offset(), protector.version(), shouldLock);
|
||||||
// TODO: handle WAL reference counters
|
// TODO: handle WAL reference counters
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
@ -144,11 +147,11 @@ bool CollectionRevisionsCache::lookupRevision(Transaction* trx, ManagedDocumentR
|
||||||
// insert found revision into our hash
|
// insert found revision into our hash
|
||||||
ChunkProtector protector = _readCache.insertAndLease(revisionId, vpack, result);
|
ChunkProtector protector = _readCache.insertAndLease(revisionId, vpack, result);
|
||||||
// insert result into the hash
|
// insert result into the hash
|
||||||
insertRevision(revisionId, protector.chunk(), protector.offset(), protector.version());
|
insertRevision(revisionId, protector.chunk(), protector.offset(), protector.version(), shouldLock);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
bool CollectionRevisionsCache::lookupRevisionConditional(Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) {
|
bool CollectionRevisionsCache::lookupRevisionConditional(Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal, bool shouldLock) {
|
||||||
// fetch document from engine
|
// fetch document from engine
|
||||||
uint8_t const* vpack = readFromEngineConditional(revisionId, maxTick, excludeWal);
|
uint8_t const* vpack = readFromEngineConditional(revisionId, maxTick, excludeWal);
|
||||||
|
|
||||||
|
@ -159,18 +162,18 @@ bool CollectionRevisionsCache::lookupRevisionConditional(Transaction* trx, Manag
|
||||||
// insert found revision into our hash
|
// insert found revision into our hash
|
||||||
ChunkProtector protector = _readCache.insertAndLease(revisionId, vpack, result);
|
ChunkProtector protector = _readCache.insertAndLease(revisionId, vpack, result);
|
||||||
// insert result into the hash
|
// insert result into the hash
|
||||||
insertRevision(revisionId, protector.chunk(), protector.offset(), protector.version());
|
insertRevision(revisionId, protector.chunk(), protector.offset(), protector.version(), shouldLock);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert from chunk
|
// insert from chunk
|
||||||
void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, RevisionCacheChunk* chunk, uint32_t offset, uint32_t version) {
|
void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, RevisionCacheChunk* chunk, uint32_t offset, uint32_t version, bool shouldLock) {
|
||||||
TRI_ASSERT(revisionId != 0);
|
TRI_ASSERT(revisionId != 0);
|
||||||
TRI_ASSERT(chunk != nullptr);
|
TRI_ASSERT(chunk != nullptr);
|
||||||
TRI_ASSERT(offset != UINT32_MAX);
|
TRI_ASSERT(offset != UINT32_MAX);
|
||||||
TRI_ASSERT(version != 0 && version != UINT32_MAX);
|
TRI_ASSERT(version != 0 && version != UINT32_MAX);
|
||||||
|
|
||||||
WRITE_LOCKER(locker, _lock);
|
CONDITIONAL_WRITE_LOCKER(locker, _lock, shouldLock);
|
||||||
int res = _revisions.insert(nullptr, RevisionCacheEntry(revisionId, chunk, offset, version));
|
int res = _revisions.insert(nullptr, RevisionCacheEntry(revisionId, chunk, offset, version));
|
||||||
|
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
|
@ -181,9 +184,9 @@ void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, Revision
|
||||||
}
|
}
|
||||||
|
|
||||||
// insert from WAL
|
// insert from WAL
|
||||||
void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, wal::Logfile* logfile, uint32_t offset) {
|
void CollectionRevisionsCache::insertRevision(TRI_voc_rid_t revisionId, wal::Logfile* logfile, uint32_t offset, bool shouldLock) {
|
||||||
TRI_ASSERT(false);
|
CONDITIONAL_WRITE_LOCKER(locker, _lock, shouldLock);
|
||||||
WRITE_LOCKER(locker, _lock);
|
|
||||||
int res = _revisions.insert(nullptr, RevisionCacheEntry(revisionId, logfile, offset));
|
int res = _revisions.insert(nullptr, RevisionCacheEntry(revisionId, logfile, offset));
|
||||||
|
|
||||||
if (res != TRI_ERROR_NO_ERROR) {
|
if (res != TRI_ERROR_NO_ERROR) {
|
||||||
|
|
|
@ -54,17 +54,25 @@ class CollectionRevisionsCache {
|
||||||
|
|
||||||
void sizeHint(int64_t hint);
|
void sizeHint(int64_t hint);
|
||||||
|
|
||||||
|
bool allowInvalidation() const {
|
||||||
|
return _allowInvalidation.load();
|
||||||
|
}
|
||||||
|
|
||||||
|
void allowInvalidation(bool value) {
|
||||||
|
_allowInvalidation.store(value);
|
||||||
|
}
|
||||||
|
|
||||||
// look up a revision
|
// look up a revision
|
||||||
bool lookupRevision(arangodb::Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, bool shouldLock);
|
bool lookupRevision(arangodb::Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, bool shouldLock);
|
||||||
|
|
||||||
// conditionally look up a revision
|
// conditionally look up a revision
|
||||||
bool lookupRevisionConditional(arangodb::Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal);
|
bool lookupRevisionConditional(arangodb::Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal, bool shouldLock);
|
||||||
|
|
||||||
// insert from chunk
|
// insert from chunk
|
||||||
void insertRevision(TRI_voc_rid_t revisionId, RevisionCacheChunk* chunk, uint32_t offset, uint32_t version);
|
void insertRevision(TRI_voc_rid_t revisionId, RevisionCacheChunk* chunk, uint32_t offset, uint32_t version, bool shouldLock);
|
||||||
|
|
||||||
// insert from WAL
|
// insert from WAL
|
||||||
void insertRevision(TRI_voc_rid_t revisionId, wal::Logfile* logfile, uint32_t offset);
|
void insertRevision(TRI_voc_rid_t revisionId, wal::Logfile* logfile, uint32_t offset, bool shouldLock);
|
||||||
|
|
||||||
// remove a revision
|
// remove a revision
|
||||||
void removeRevision(TRI_voc_rid_t revisionId);
|
void removeRevision(TRI_voc_rid_t revisionId);
|
||||||
|
@ -85,6 +93,8 @@ class CollectionRevisionsCache {
|
||||||
LogicalCollection* _collection;
|
LogicalCollection* _collection;
|
||||||
|
|
||||||
ReadCache _readCache;
|
ReadCache _readCache;
|
||||||
|
|
||||||
|
std::atomic<bool> _allowInvalidation;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace arangodb
|
} // namespace arangodb
|
||||||
|
|
|
@ -280,11 +280,9 @@ int TraditionalKeyGenerator::validate(std::string const& key, bool isRestore) {
|
||||||
/// @brief track usage of a key
|
/// @brief track usage of a key
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
void TraditionalKeyGenerator::track(char const*, VPackValueLength) {}
|
void TraditionalKeyGenerator::track(char const*, VPackValueLength) {
|
||||||
|
TRI_ASSERT(false);
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
}
|
||||||
/// @brief create a VPack representation of the generator
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief create a VPack representation of the generator
|
/// @brief create a VPack representation of the generator
|
||||||
|
|
|
@ -88,6 +88,8 @@ class KeyGenerator {
|
||||||
static KeyGenerator* factory(arangodb::velocypack::Slice const&);
|
static KeyGenerator* factory(arangodb::velocypack::Slice const&);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
virtual bool trackKeys() const = 0;
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief generate a key
|
/// @brief generate a key
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -160,6 +162,9 @@ class TraditionalKeyGenerator : public KeyGenerator {
|
||||||
static bool validateKey(char const* key, size_t len);
|
static bool validateKey(char const* key, size_t len);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
bool trackKeys() const override { return false; }
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief generate a key
|
/// @brief generate a key
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
@ -213,6 +218,9 @@ class AutoIncrementKeyGenerator : public KeyGenerator {
|
||||||
static bool validateKey(char const* key, size_t len);
|
static bool validateKey(char const* key, size_t len);
|
||||||
|
|
||||||
public:
|
public:
|
||||||
|
|
||||||
|
bool trackKeys() const override { return true; }
|
||||||
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
/// @brief generate a key
|
/// @brief generate a key
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
|
@ -332,6 +332,7 @@ LogicalCollection::LogicalCollection(
|
||||||
_lastCompactionStatus(nullptr),
|
_lastCompactionStatus(nullptr),
|
||||||
_lastCompactionStamp(0.0),
|
_lastCompactionStamp(0.0),
|
||||||
_uncollectedLogfileEntries(0),
|
_uncollectedLogfileEntries(0),
|
||||||
|
_isInitialIteration(false),
|
||||||
_revisionError(false) {
|
_revisionError(false) {
|
||||||
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
_keyGenerator.reset(KeyGenerator::factory(other.keyOptions()));
|
||||||
|
|
||||||
|
@ -397,6 +398,7 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
||||||
_lastCompactionStatus(nullptr),
|
_lastCompactionStatus(nullptr),
|
||||||
_lastCompactionStamp(0.0),
|
_lastCompactionStamp(0.0),
|
||||||
_uncollectedLogfileEntries(0),
|
_uncollectedLogfileEntries(0),
|
||||||
|
_isInitialIteration(false),
|
||||||
_revisionError(false) {
|
_revisionError(false) {
|
||||||
|
|
||||||
if (!IsAllowedName(info)) {
|
if (!IsAllowedName(info)) {
|
||||||
|
@ -681,10 +683,6 @@ uint32_t LogicalCollection::internalVersion() const {
|
||||||
return _internalVersion;
|
return _internalVersion;
|
||||||
}
|
}
|
||||||
|
|
||||||
TRI_voc_cid_t LogicalCollection::cid() const {
|
|
||||||
return _cid;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string LogicalCollection::cid_as_string() const {
|
std::string LogicalCollection::cid_as_string() const {
|
||||||
return basics::StringUtils::itoa(_cid);
|
return basics::StringUtils::itoa(_cid);
|
||||||
}
|
}
|
||||||
|
@ -834,10 +832,6 @@ VPackSlice LogicalCollection::keyOptions() const {
|
||||||
return VPackSlice(_keyOptions->data());
|
return VPackSlice(_keyOptions->data());
|
||||||
}
|
}
|
||||||
|
|
||||||
arangodb::KeyGenerator* LogicalCollection::keyGenerator() const {
|
|
||||||
return _keyGenerator.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
// SECTION: Indexes
|
// SECTION: Indexes
|
||||||
uint32_t LogicalCollection::indexBuckets() const {
|
uint32_t LogicalCollection::indexBuckets() const {
|
||||||
return _indexBuckets;
|
return _indexBuckets;
|
||||||
|
@ -977,13 +971,18 @@ int LogicalCollection::rename(std::string const& newName) {
|
||||||
}
|
}
|
||||||
|
|
||||||
int LogicalCollection::close() {
|
int LogicalCollection::close() {
|
||||||
// This was unload
|
// This was unload() in 3.0
|
||||||
auto primIdx = primaryIndex();
|
auto primIdx = primaryIndex();
|
||||||
auto idxSize = primIdx->size();
|
auto idxSize = primIdx->size();
|
||||||
|
|
||||||
if (!_isDeleted &&
|
if (!_isDeleted &&
|
||||||
_physical->initialCount() != static_cast<int64_t>(idxSize)) {
|
_physical->initialCount() != static_cast<int64_t>(idxSize)) {
|
||||||
_physical->updateCount(idxSize);
|
_physical->updateCount(idxSize);
|
||||||
|
|
||||||
|
// save new "count" value
|
||||||
|
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||||
|
bool const doSync = application_features::ApplicationServer::getFeature<DatabaseFeature>("Database")->forceSyncProperties();
|
||||||
|
engine->changeCollection(_vocbase, _cid, this, doSync);
|
||||||
}
|
}
|
||||||
|
|
||||||
// We also have to unload the indexes.
|
// We also have to unload the indexes.
|
||||||
|
@ -1012,8 +1011,11 @@ void LogicalCollection::drop() {
|
||||||
engine->dropCollection(_vocbase, this);
|
engine->dropCollection(_vocbase, this);
|
||||||
_isDeleted = true;
|
_isDeleted = true;
|
||||||
|
|
||||||
// save some memory by freeing the revisions cache
|
// save some memory by freeing the revisions cache and indexes
|
||||||
|
_keyGenerator.reset();
|
||||||
_revisionsCache.reset();
|
_revisionsCache.reset();
|
||||||
|
_indexes.clear();
|
||||||
|
_physical.reset();
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogicalCollection::setStatus(TRI_vocbase_col_status_e status) {
|
void LogicalCollection::setStatus(TRI_vocbase_col_status_e status) {
|
||||||
|
@ -1104,10 +1106,6 @@ void LogicalCollection::toVelocyPack(VPackBuilder& builder, bool includeIndexes,
|
||||||
engine->getCollectionInfo(_vocbase, _cid, builder, includeIndexes, maxTick);
|
engine->getCollectionInfo(_vocbase, _cid, builder, includeIndexes, maxTick);
|
||||||
}
|
}
|
||||||
|
|
||||||
TRI_vocbase_t* LogicalCollection::vocbase() const {
|
|
||||||
return _vocbase;
|
|
||||||
}
|
|
||||||
|
|
||||||
void LogicalCollection::increaseInternalVersion() {
|
void LogicalCollection::increaseInternalVersion() {
|
||||||
++_internalVersion;
|
++_internalVersion;
|
||||||
}
|
}
|
||||||
|
@ -1251,6 +1249,13 @@ void LogicalCollection::open(bool ignoreErrors) {
|
||||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||||
engine->getCollectionInfo(_vocbase, cid(), builder, true, 0);
|
engine->getCollectionInfo(_vocbase, cid(), builder, true, 0);
|
||||||
|
|
||||||
|
VPackSlice initialCount = builder.slice().get(std::vector<std::string>({ "parameters", "count" }));
|
||||||
|
if (initialCount.isNumber()) {
|
||||||
|
int64_t count = initialCount.getNumber<int64_t>();
|
||||||
|
if (count > 0) {
|
||||||
|
_physical->updateCount(count);
|
||||||
|
}
|
||||||
|
}
|
||||||
double start = TRI_microtime();
|
double start = TRI_microtime();
|
||||||
|
|
||||||
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
|
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
|
||||||
|
@ -1274,6 +1279,9 @@ void LogicalCollection::open(bool ignoreErrors) {
|
||||||
<< "iterate-markers { collection: " << _vocbase->name() << "/"
|
<< "iterate-markers { collection: " << _vocbase->name() << "/"
|
||||||
<< _name << " }";
|
<< _name << " }";
|
||||||
|
|
||||||
|
_revisionsCache->allowInvalidation(false);
|
||||||
|
_isInitialIteration = true;
|
||||||
|
|
||||||
// iterate over all markers of the collection
|
// iterate over all markers of the collection
|
||||||
res = getPhysical()->iterateMarkersOnLoad(&trx);
|
res = getPhysical()->iterateMarkersOnLoad(&trx);
|
||||||
|
|
||||||
|
@ -1283,6 +1291,8 @@ void LogicalCollection::open(bool ignoreErrors) {
|
||||||
THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot iterate data of document collection: ") + TRI_errno_string(res));
|
THROW_ARANGO_EXCEPTION_MESSAGE(res, std::string("cannot iterate data of document collection: ") + TRI_errno_string(res));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_isInitialIteration = false;
|
||||||
|
|
||||||
// build the indexes meta-data, but do not fill the indexes yet
|
// build the indexes meta-data, but do not fill the indexes yet
|
||||||
{
|
{
|
||||||
auto old = useSecondaryIndexes();
|
auto old = useSecondaryIndexes();
|
||||||
|
@ -1311,6 +1321,8 @@ void LogicalCollection::open(bool ignoreErrors) {
|
||||||
fillIndexes(&trx);
|
fillIndexes(&trx);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
_revisionsCache->allowInvalidation(true);
|
||||||
|
|
||||||
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
|
LOG_TOPIC(TRACE, Logger::PERFORMANCE)
|
||||||
<< "[timer] " << Logger::FIXED(TRI_microtime() - start)
|
<< "[timer] " << Logger::FIXED(TRI_microtime() - start)
|
||||||
<< " s, open-document-collection { collection: " << _vocbase->name() << "/"
|
<< " s, open-document-collection { collection: " << _vocbase->name() << "/"
|
||||||
|
@ -3520,17 +3532,17 @@ void LogicalCollection::newObjectForRemove(
|
||||||
|
|
||||||
bool LogicalCollection::readRevision(Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId) {
|
bool LogicalCollection::readRevision(Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId) {
|
||||||
TRI_ASSERT(_revisionsCache);
|
TRI_ASSERT(_revisionsCache);
|
||||||
return _revisionsCache->lookupRevision(trx, result, revisionId, _status == TRI_VOC_COL_STATUS_LOADING);
|
return _revisionsCache->lookupRevision(trx, result, revisionId, !_isInitialIteration);
|
||||||
}
|
}
|
||||||
|
|
||||||
bool LogicalCollection::readRevisionConditional(Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) {
|
bool LogicalCollection::readRevisionConditional(Transaction* trx, ManagedDocumentResult& result, TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) {
|
||||||
TRI_ASSERT(_revisionsCache);
|
TRI_ASSERT(_revisionsCache);
|
||||||
return _revisionsCache->lookupRevisionConditional(trx, result, revisionId, maxTick, excludeWal);
|
return _revisionsCache->lookupRevisionConditional(trx, result, revisionId, maxTick, excludeWal, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogicalCollection::insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
void LogicalCollection::insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
||||||
// note: there is no need to insert into the cache here as the data points to temporary storage
|
// note: there is no need to insert into the cache here as the data points to temporary storage
|
||||||
getPhysical()->insertRevision(revisionId, dataptr, fid, isInWal);
|
getPhysical()->insertRevision(revisionId, dataptr, fid, isInWal, true);
|
||||||
}
|
}
|
||||||
|
|
||||||
void LogicalCollection::updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
void LogicalCollection::updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) {
|
||||||
|
|
|
@ -106,6 +106,8 @@ class LogicalCollection {
|
||||||
|
|
||||||
void ensureRevisionsCache();
|
void ensureRevisionsCache();
|
||||||
|
|
||||||
|
void isInitialIteration(bool value) { _isInitialIteration = value; }
|
||||||
|
|
||||||
// TODO: MOVE TO PHYSICAL?
|
// TODO: MOVE TO PHYSICAL?
|
||||||
bool isFullyCollected();
|
bool isFullyCollected();
|
||||||
int64_t uncollectedLogfileEntries() const { return _uncollectedLogfileEntries.load(); }
|
int64_t uncollectedLogfileEntries() const { return _uncollectedLogfileEntries.load(); }
|
||||||
|
@ -137,7 +139,10 @@ class LogicalCollection {
|
||||||
|
|
||||||
uint32_t internalVersion() const;
|
uint32_t internalVersion() const;
|
||||||
|
|
||||||
TRI_voc_cid_t cid() const;
|
inline TRI_voc_cid_t cid() const {
|
||||||
|
return _cid;
|
||||||
|
}
|
||||||
|
|
||||||
std::string cid_as_string() const;
|
std::string cid_as_string() const;
|
||||||
|
|
||||||
TRI_voc_cid_t planId() const;
|
TRI_voc_cid_t planId() const;
|
||||||
|
@ -214,7 +219,9 @@ class LogicalCollection {
|
||||||
|
|
||||||
// Get a reference to this KeyGenerator.
|
// Get a reference to this KeyGenerator.
|
||||||
// Caller is not allowed to free it.
|
// Caller is not allowed to free it.
|
||||||
arangodb::KeyGenerator* keyGenerator() const;
|
inline arangodb::KeyGenerator* keyGenerator() const {
|
||||||
|
return _keyGenerator.get();
|
||||||
|
}
|
||||||
|
|
||||||
PhysicalCollection* getPhysical() const { return _physical.get(); }
|
PhysicalCollection* getPhysical() const { return _physical.get(); }
|
||||||
|
|
||||||
|
@ -261,10 +268,9 @@ class LogicalCollection {
|
||||||
/// The builder has to be an opened Type::Object
|
/// The builder has to be an opened Type::Object
|
||||||
void toVelocyPack(arangodb::velocypack::Builder&, bool, TRI_voc_tick_t);
|
void toVelocyPack(arangodb::velocypack::Builder&, bool, TRI_voc_tick_t);
|
||||||
|
|
||||||
TRI_vocbase_t* vocbase() const;
|
inline TRI_vocbase_t* vocbase() const {
|
||||||
|
return _vocbase;
|
||||||
// Only Local
|
}
|
||||||
void updateCount(size_t);
|
|
||||||
|
|
||||||
// Update this collection.
|
// Update this collection.
|
||||||
virtual int update(arangodb::velocypack::Slice const&, bool);
|
virtual int update(arangodb::velocypack::Slice const&, bool);
|
||||||
|
@ -575,6 +581,11 @@ class LogicalCollection {
|
||||||
double _lastCompactionStamp;
|
double _lastCompactionStamp;
|
||||||
|
|
||||||
std::atomic<int64_t> _uncollectedLogfileEntries;
|
std::atomic<int64_t> _uncollectedLogfileEntries;
|
||||||
|
|
||||||
|
/// @brief: flag that is set to true when the documents are
|
||||||
|
/// initial enumerated and the primary index is built
|
||||||
|
bool _isInitialIteration;
|
||||||
|
|
||||||
bool _revisionError;
|
bool _revisionError;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -103,8 +103,12 @@ class ManagedDocumentResult {
|
||||||
void addExisting(ChunkProtector& protector, TRI_voc_rid_t revisionId);
|
void addExisting(ChunkProtector& protector, TRI_voc_rid_t revisionId);
|
||||||
|
|
||||||
bool hasSeenChunk(RevisionCacheChunk* chunk) const { return _chunkCache.contains(chunk); }
|
bool hasSeenChunk(RevisionCacheChunk* chunk) const { return _chunkCache.contains(chunk); }
|
||||||
TRI_voc_rid_t lastRevisionId() const { return _lastRevisionId; }
|
inline TRI_voc_rid_t lastRevisionId() const { return _lastRevisionId; }
|
||||||
uint8_t const* lastVPack() const { return _vpack; }
|
|
||||||
|
inline void setCache(TRI_voc_rid_t revisionId, uint8_t const* vpack) {
|
||||||
|
_lastRevisionId = revisionId;
|
||||||
|
_vpack = vpack;
|
||||||
|
}
|
||||||
|
|
||||||
//void clear();
|
//void clear();
|
||||||
|
|
||||||
|
|
|
@ -98,7 +98,7 @@ class PhysicalCollection {
|
||||||
|
|
||||||
virtual uint8_t const* lookupRevisionVPack(TRI_voc_rid_t revisionId) const = 0;
|
virtual uint8_t const* lookupRevisionVPack(TRI_voc_rid_t revisionId) const = 0;
|
||||||
virtual uint8_t const* lookupRevisionVPackConditional(TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) const = 0;
|
virtual uint8_t const* lookupRevisionVPackConditional(TRI_voc_rid_t revisionId, TRI_voc_tick_t maxTick, bool excludeWal) const = 0;
|
||||||
virtual void insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) = 0;
|
virtual void insertRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal, bool shouldLock) = 0;
|
||||||
virtual void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) = 0;
|
virtual void updateRevision(TRI_voc_rid_t revisionId, uint8_t const* dataptr, TRI_voc_fid_t fid, bool isInWal) = 0;
|
||||||
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
virtual bool updateRevisionConditional(TRI_voc_rid_t revisionId, TRI_df_marker_t const* oldPosition, TRI_df_marker_t const* newPosition, TRI_voc_fid_t newFid, bool isInWal) = 0;
|
||||||
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
virtual void removeRevision(TRI_voc_rid_t revisionId, bool updateStats) = 0;
|
||||||
|
|
|
@ -134,7 +134,11 @@ uint32_t RevisionCacheChunk::advanceWritePosition(uint32_t size) {
|
||||||
return offset;
|
return offset;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RevisionCacheChunk::invalidate(std::vector<TRI_voc_rid_t>& revisions) {
|
bool RevisionCacheChunk::invalidate(std::vector<TRI_voc_rid_t>& revisions) {
|
||||||
|
if (!_collectionCache->allowInvalidation()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
// wait until all writers have finished
|
// wait until all writers have finished
|
||||||
while (true) {
|
while (true) {
|
||||||
{
|
{
|
||||||
|
@ -147,6 +151,8 @@ void RevisionCacheChunk::invalidate(std::vector<TRI_voc_rid_t>& revisions) {
|
||||||
}
|
}
|
||||||
|
|
||||||
revisions.clear();
|
revisions.clear();
|
||||||
|
revisions.reserve(8192);
|
||||||
|
|
||||||
findRevisions(revisions);
|
findRevisions(revisions);
|
||||||
invalidate();
|
invalidate();
|
||||||
if (!revisions.empty()) {
|
if (!revisions.empty()) {
|
||||||
|
@ -154,6 +160,7 @@ void RevisionCacheChunk::invalidate(std::vector<TRI_voc_rid_t>& revisions) {
|
||||||
}
|
}
|
||||||
// increase version number once again
|
// increase version number once again
|
||||||
invalidate();
|
invalidate();
|
||||||
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
void RevisionCacheChunk::findRevisions(std::vector<TRI_voc_rid_t>& revisions) {
|
void RevisionCacheChunk::findRevisions(std::vector<TRI_voc_rid_t>& revisions) {
|
||||||
|
|
|
@ -103,7 +103,7 @@ class RevisionCacheChunk {
|
||||||
|
|
||||||
bool isUsed() noexcept;
|
bool isUsed() noexcept;
|
||||||
|
|
||||||
void invalidate(std::vector<TRI_voc_rid_t>& revisions);
|
bool invalidate(std::vector<TRI_voc_rid_t>& revisions);
|
||||||
|
|
||||||
void wipeout() {
|
void wipeout() {
|
||||||
#ifdef TRI_ENABLE_MAINTAINER_MODE
|
#ifdef TRI_ENABLE_MAINTAINER_MODE
|
||||||
|
|
|
@ -195,22 +195,19 @@ void RevisionCacheChunkAllocator::removeCollection(ReadCache* cache) {
|
||||||
|
|
||||||
bool RevisionCacheChunkAllocator::garbageCollect() {
|
bool RevisionCacheChunkAllocator::garbageCollect() {
|
||||||
RevisionCacheChunk* chunk = nullptr;
|
RevisionCacheChunk* chunk = nullptr;
|
||||||
|
bool hasMemoryPressure;
|
||||||
|
|
||||||
{
|
{
|
||||||
WRITE_LOCKER(locker, _chunksLock);
|
WRITE_LOCKER(locker, _chunksLock);
|
||||||
// LOG(ERR) << "gc: total allocated: " << _totalAllocated << ", target: " << _totalTargetSize;
|
// LOG(ERR) << "gc: total allocated: " << _totalAllocated << ", target: " << _totalTargetSize;
|
||||||
|
hasMemoryPressure = (_totalAllocated >= _totalTargetSize);
|
||||||
|
|
||||||
if (_totalAllocated < _totalTargetSize) {
|
if (hasMemoryPressure && !_freeList.empty()) {
|
||||||
// nothing to do
|
|
||||||
// LOG(ERR) << "gc: not necessary";
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
// try a chunk from the freelist first
|
// try a chunk from the freelist first
|
||||||
if (!_freeList.empty()) {
|
|
||||||
chunk = _freeList.back();
|
chunk = _freeList.back();
|
||||||
_freeList.pop_back();
|
_freeList.pop_back();
|
||||||
// fix statistics here already
|
|
||||||
|
// fix statistics here already, because we already have the lock
|
||||||
TRI_ASSERT(_totalAllocated >= chunk->size());
|
TRI_ASSERT(_totalAllocated >= chunk->size());
|
||||||
_totalAllocated -= chunk->size();
|
_totalAllocated -= chunk->size();
|
||||||
}
|
}
|
||||||
|
@ -265,34 +262,29 @@ bool RevisionCacheChunkAllocator::garbageCollect() {
|
||||||
MUTEX_LOCKER(locker, _gcLock);
|
MUTEX_LOCKER(locker, _gcLock);
|
||||||
|
|
||||||
auto it = _fullChunks.find(chunkInfo.cache);
|
auto it = _fullChunks.find(chunkInfo.cache);
|
||||||
|
|
||||||
if (it != _fullChunks.end()) {
|
if (it != _fullChunks.end()) {
|
||||||
|
(*it).second.erase(chunk);
|
||||||
if ((*it).second.empty()) {
|
if ((*it).second.empty()) {
|
||||||
_fullChunks.erase(chunkInfo.cache);
|
_fullChunks.erase(chunkInfo.cache);
|
||||||
} else {
|
|
||||||
(*it).second.erase(chunk);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// now move chunk into freelist
|
// now free the chunk (don't move it into freelist so we
|
||||||
|
// can release the chunk's allocated memory here already)
|
||||||
{
|
{
|
||||||
WRITE_LOCKER(locker, _chunksLock);
|
WRITE_LOCKER(locker, _chunksLock);
|
||||||
try {
|
|
||||||
_freeList.push_back(chunk);
|
|
||||||
} catch (...) {
|
|
||||||
// if movement fails then we simply throw away the chunk
|
|
||||||
TRI_ASSERT(_totalAllocated >= chunk->size());
|
TRI_ASSERT(_totalAllocated >= chunk->size());
|
||||||
_totalAllocated -= chunk->size();
|
_totalAllocated -= chunk->size();
|
||||||
deleteChunk(chunk);
|
deleteChunk(chunk);
|
||||||
throw;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
} else {
|
} else if (hasMemoryPressure) {
|
||||||
// LOG(ERR) << "gc: invalidating chunk " << chunk;
|
// LOG(ERR) << "gc: invalidating chunk " << chunk;
|
||||||
revisions.reserve(8192);
|
if (chunk->invalidate(revisions)) {
|
||||||
chunk->invalidate(revisions);
|
|
||||||
// LOG(ERR) << "gc: invalidating chunk " << chunk << " done";
|
// LOG(ERR) << "gc: invalidating chunk " << chunk << " done";
|
||||||
MUTEX_LOCKER(locker, _gcLock);
|
MUTEX_LOCKER(locker, _gcLock);
|
||||||
|
|
||||||
|
@ -308,6 +300,7 @@ bool RevisionCacheChunkAllocator::garbageCollect() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// LOG(ERR) << "gc: over. worked: " << worked;
|
// LOG(ERR) << "gc: over. worked: " << worked;
|
||||||
return worked;
|
return worked;
|
||||||
|
|
|
@ -547,7 +547,13 @@ class AssocMulti {
|
||||||
newBucket[j].writeHashCache(0);
|
newBucket[j].writeHashCache(0);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
try {
|
||||||
|
// shouldn't fail as enough space was reserved above, but let's be paranoid
|
||||||
empty.emplace_back(newBucket);
|
empty.emplace_back(newBucket);
|
||||||
|
} catch (...) {
|
||||||
|
delete[] newBucket;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
|
|
|
@ -330,7 +330,13 @@ class AssocUnique {
|
||||||
|
|
||||||
for (size_t i = 0; i < _buckets.size(); ++i) {
|
for (size_t i = 0; i < _buckets.size(); ++i) {
|
||||||
auto newBucket = new Element[static_cast<size_t>(nrAlloc)]();
|
auto newBucket = new Element[static_cast<size_t>(nrAlloc)]();
|
||||||
|
try {
|
||||||
|
// shouldn't fail as enough space was reserved above, but let's be paranoid
|
||||||
empty.emplace_back(newBucket);
|
empty.emplace_back(newBucket);
|
||||||
|
} catch (...) {
|
||||||
|
delete[] newBucket;
|
||||||
|
throw;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t i = 0;
|
size_t i = 0;
|
||||||
|
|
|
@ -39,7 +39,7 @@ namespace arangodb {
|
||||||
class StringRef {
|
class StringRef {
|
||||||
public:
|
public:
|
||||||
/// @brief create an empty StringRef
|
/// @brief create an empty StringRef
|
||||||
StringRef() : _data(""), _length(0) {}
|
constexpr StringRef() : _data(""), _length(0) {}
|
||||||
|
|
||||||
/// @brief create a StringRef from an std::string
|
/// @brief create a StringRef from an std::string
|
||||||
explicit StringRef(std::string const& str) : _data(str.c_str()), _length(str.size()) {}
|
explicit StringRef(std::string const& str) : _data(str.c_str()), _length(str.size()) {}
|
||||||
|
|
|
@ -84,6 +84,7 @@ static v8::Handle<v8::Value> ObjectVPackObject(v8::Isolate* isolate,
|
||||||
v8::Local<v8::Value> sub;
|
v8::Local<v8::Value> sub;
|
||||||
if (v.isString()) {
|
if (v.isString()) {
|
||||||
char const* p = v.getString(l);
|
char const* p = v.getString(l);
|
||||||
|
// value of _key, _id, _from, _to, and _rev is ASCII too
|
||||||
sub = TRI_V8_ASCII_PAIR_STRING(p, l);
|
sub = TRI_V8_ASCII_PAIR_STRING(p, l);
|
||||||
} else {
|
} else {
|
||||||
sub = TRI_VPackToV8(isolate, v, options, &slice);
|
sub = TRI_VPackToV8(isolate, v, options, &slice);
|
||||||
|
|
Loading…
Reference in New Issue