mirror of https://gitee.com/bigwinds/arangodb
Propper commit Sequence Numbers (#6958)
This commit is contained in:
parent
18de63c7c8
commit
c2b6fb99ba
|
@ -438,6 +438,12 @@ class Transaction {
|
||||||
|
|
||||||
virtual uint64_t GetLogNumber() const { return log_number_; }
|
virtual uint64_t GetLogNumber() const { return log_number_; }
|
||||||
|
|
||||||
|
// Sequence number in WAL where operations start, only valid after
|
||||||
|
// a successfull commit with the WRITE_COMMITTED db txn policy
|
||||||
|
virtual SequenceNumber GetCommitedSeqNumber() const {
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
virtual Status SetName(const TransactionName& name) = 0;
|
virtual Status SetName(const TransactionName& name) = 0;
|
||||||
|
|
||||||
virtual TransactionName GetName() const { return name_; }
|
virtual TransactionName GetName() const { return name_; }
|
||||||
|
|
|
@ -125,7 +125,8 @@ bool PessimisticTransaction::IsExpired() const {
|
||||||
WriteCommittedTxn::WriteCommittedTxn(TransactionDB* txn_db,
|
WriteCommittedTxn::WriteCommittedTxn(TransactionDB* txn_db,
|
||||||
const WriteOptions& write_options,
|
const WriteOptions& write_options,
|
||||||
const TransactionOptions& txn_options)
|
const TransactionOptions& txn_options)
|
||||||
: PessimisticTransaction(txn_db, write_options, txn_options){};
|
: PessimisticTransaction(txn_db, write_options, txn_options),
|
||||||
|
_commited_seq_nr(0) {};
|
||||||
|
|
||||||
Status PessimisticTransaction::CommitBatch(WriteBatch* batch) {
|
Status PessimisticTransaction::CommitBatch(WriteBatch* batch) {
|
||||||
TransactionKeyMap keys_to_unlock;
|
TransactionKeyMap keys_to_unlock;
|
||||||
|
@ -228,10 +229,15 @@ Status WriteCommittedTxn::PrepareInternal() {
|
||||||
WriteOptions write_options = write_options_;
|
WriteOptions write_options = write_options_;
|
||||||
write_options.disableWAL = false;
|
write_options.disableWAL = false;
|
||||||
WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_);
|
WriteBatchInternal::MarkEndPrepare(GetWriteBatch()->GetWriteBatch(), name_);
|
||||||
|
uint64_t seq_used = kMaxSequenceNumber;
|
||||||
Status s =
|
Status s =
|
||||||
db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(),
|
db_impl_->WriteImpl(write_options, GetWriteBatch()->GetWriteBatch(),
|
||||||
/*callback*/ nullptr, &log_number_, /*log ref*/ 0,
|
/*callback*/ nullptr, &log_number_, /*log ref*/ 0,
|
||||||
/* disable_memtable*/ true);
|
/*disable_memtable*/ true, &seq_used);
|
||||||
|
assert(!s.ok() || seq_used != kMaxSequenceNumber);
|
||||||
|
if (s.ok()) {
|
||||||
|
_commited_seq_nr = seq_used;
|
||||||
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -320,7 +326,14 @@ Status PessimisticTransaction::Commit() {
|
||||||
}
|
}
|
||||||
|
|
||||||
Status WriteCommittedTxn::CommitWithoutPrepareInternal() {
|
Status WriteCommittedTxn::CommitWithoutPrepareInternal() {
|
||||||
Status s = db_->Write(write_options_, GetWriteBatch()->GetWriteBatch());
|
uint64_t seq_used = kMaxSequenceNumber;
|
||||||
|
auto s = db_impl_->WriteImpl(write_options_, GetWriteBatch()->GetWriteBatch(),
|
||||||
|
/*callback*/ nullptr, /*log nr*/ nullptr,
|
||||||
|
/*log ref*/ 0, /*disable_memtable*/ false, &seq_used);
|
||||||
|
assert(!s.ok() || seq_used != kMaxSequenceNumber);
|
||||||
|
if (s.ok()) {
|
||||||
|
_commited_seq_nr = seq_used;
|
||||||
|
}
|
||||||
return s;
|
return s;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -202,6 +202,11 @@ class WriteCommittedTxn : public PessimisticTransaction {
|
||||||
|
|
||||||
virtual ~WriteCommittedTxn() {}
|
virtual ~WriteCommittedTxn() {}
|
||||||
|
|
||||||
|
SequenceNumber GetCommitedSeqNumber() const override {
|
||||||
|
assert(txn_state_ == COMMITED);
|
||||||
|
return _commited_seq_nr;
|
||||||
|
}
|
||||||
|
|
||||||
private:
|
private:
|
||||||
Status PrepareInternal() override;
|
Status PrepareInternal() override;
|
||||||
|
|
||||||
|
@ -216,6 +221,10 @@ class WriteCommittedTxn : public PessimisticTransaction {
|
||||||
// No copying allowed
|
// No copying allowed
|
||||||
WriteCommittedTxn(const WriteCommittedTxn&);
|
WriteCommittedTxn(const WriteCommittedTxn&);
|
||||||
void operator=(const WriteCommittedTxn&);
|
void operator=(const WriteCommittedTxn&);
|
||||||
|
|
||||||
|
protected:
|
||||||
|
// seq_nr of WriteBatch in WAL
|
||||||
|
SequenceNumber _commited_seq_nr;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace rocksdb
|
} // namespace rocksdb
|
||||||
|
|
|
@ -181,6 +181,7 @@ auth::TokenCache::Entry auth::TokenCache::checkAuthenticationJWT(
|
||||||
WRITE_LOCKER(writeLocker, _jwtLock);
|
WRITE_LOCKER(writeLocker, _jwtLock);
|
||||||
// intentionally copy the entry from the cache
|
// intentionally copy the entry from the cache
|
||||||
auth::TokenCache::Entry const& entry = _jwtCache.get(jwt);
|
auth::TokenCache::Entry const& entry = _jwtCache.get(jwt);
|
||||||
|
// would have thrown if not found
|
||||||
if (entry.expired()) {
|
if (entry.expired()) {
|
||||||
try {
|
try {
|
||||||
_jwtCache.remove(jwt);
|
_jwtCache.remove(jwt);
|
||||||
|
@ -215,13 +216,6 @@ auth::TokenCache::Entry auth::TokenCache::checkAuthenticationJWT(
|
||||||
return auth::TokenCache::Entry::Unauthenticated();
|
return auth::TokenCache::Entry::Unauthenticated();
|
||||||
}
|
}
|
||||||
|
|
||||||
auth::TokenCache::Entry entry = validateJwtBody(body);
|
|
||||||
if (!entry._authenticated) {
|
|
||||||
LOG_TOPIC(TRACE, arangodb::Logger::AUTHENTICATION)
|
|
||||||
<< "Couldn't validate jwt body " << body;
|
|
||||||
return auth::TokenCache::Entry::Unauthenticated();
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string const message = header + "." + body;
|
std::string const message = header + "." + body;
|
||||||
if (!validateJwtHMAC256Signature(message, signature)) {
|
if (!validateJwtHMAC256Signature(message, signature)) {
|
||||||
LOG_TOPIC(TRACE, arangodb::Logger::AUTHENTICATION)
|
LOG_TOPIC(TRACE, arangodb::Logger::AUTHENTICATION)
|
||||||
|
@ -230,6 +224,13 @@ auth::TokenCache::Entry auth::TokenCache::checkAuthenticationJWT(
|
||||||
return auth::TokenCache::Entry::Unauthenticated();
|
return auth::TokenCache::Entry::Unauthenticated();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
auth::TokenCache::Entry entry = validateJwtBody(body);
|
||||||
|
if (!entry._authenticated) {
|
||||||
|
LOG_TOPIC(TRACE, arangodb::Logger::AUTHENTICATION)
|
||||||
|
<< "Couldn't validate jwt body " << body;
|
||||||
|
return auth::TokenCache::Entry::Unauthenticated();
|
||||||
|
}
|
||||||
|
|
||||||
WRITE_LOCKER(writeLocker, _jwtLock);
|
WRITE_LOCKER(writeLocker, _jwtLock);
|
||||||
_jwtCache.put(jwt, entry);
|
_jwtCache.put(jwt, entry);
|
||||||
return entry;
|
return entry;
|
||||||
|
|
|
@ -571,6 +571,9 @@ Result auth::UserManager::accessUser(std::string const& user,
|
||||||
}
|
}
|
||||||
|
|
||||||
bool auth::UserManager::userExists(std::string const& user) {
|
bool auth::UserManager::userExists(std::string const& user) {
|
||||||
|
if (user.empty()) {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
loadFromDB();
|
loadFromDB();
|
||||||
|
|
||||||
READ_LOCKER(readGuard, _userCacheLock);
|
READ_LOCKER(readGuard, _userCacheLock);
|
||||||
|
|
|
@ -640,25 +640,20 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
|
||||||
// non-transactional truncate optimization. We perform a bunch of
|
// non-transactional truncate optimization. We perform a bunch of
|
||||||
// range deletes and circumwent the normal rocksdb::Transaction.
|
// range deletes and circumwent the normal rocksdb::Transaction.
|
||||||
// no savepoint needed here
|
// no savepoint needed here
|
||||||
|
TRI_ASSERT(!state->hasOperations()); // not allowed
|
||||||
rocksdb::WriteBatch batch;
|
|
||||||
// add the assertion again here, so we are sure we can use RangeDeletes
|
|
||||||
TRI_ASSERT(static_cast<RocksDBEngine*>(EngineSelectorFeature::ENGINE)->canUseRangeDeleteInWal());
|
|
||||||
|
|
||||||
auto log = RocksDBLogValue::CollectionTruncate(trx->vocbase().id(),
|
|
||||||
_logicalCollection.id(), _objectId);
|
|
||||||
rocksdb::Status s = batch.PutLogData(log.slice());
|
|
||||||
if (!s.ok()) {
|
|
||||||
return rocksutils::convertStatus(s);
|
|
||||||
}
|
|
||||||
|
|
||||||
TRI_IF_FAILURE("RocksDBRemoveLargeRangeOn") {
|
TRI_IF_FAILURE("RocksDBRemoveLargeRangeOn") {
|
||||||
return Result(TRI_ERROR_DEBUG);
|
return Result(TRI_ERROR_DEBUG);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
RocksDBEngine* engine = rocksutils::globalRocksEngine();
|
||||||
|
// add the assertion again here, so we are sure we can use RangeDeletes
|
||||||
|
TRI_ASSERT(engine->canUseRangeDeleteInWal());
|
||||||
|
|
||||||
|
rocksdb::WriteBatch batch;
|
||||||
// delete documents
|
// delete documents
|
||||||
RocksDBKeyBounds bounds = RocksDBKeyBounds::CollectionDocuments(_objectId);
|
RocksDBKeyBounds bounds = RocksDBKeyBounds::CollectionDocuments(_objectId);
|
||||||
s = batch.DeleteRange(bounds.columnFamily(), bounds.start(), bounds.end());
|
rocksdb::Status s = batch.DeleteRange(bounds.columnFamily(), bounds.start(), bounds.end());
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return rocksutils::convertStatus(s);
|
return rocksutils::convertStatus(s);
|
||||||
}
|
}
|
||||||
|
@ -677,19 +672,30 @@ Result RocksDBCollection::truncate(transaction::Methods* trx,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
state->addTruncateOperation(_logicalCollection.id());
|
// now add the log entry so we can recover the correct count
|
||||||
|
auto log = RocksDBLogValue::CollectionTruncate(trx->vocbase().id(),
|
||||||
|
_logicalCollection.id(), _objectId);
|
||||||
|
s = batch.PutLogData(log.slice());
|
||||||
|
if (!s.ok()) {
|
||||||
|
return rocksutils::convertStatus(s);
|
||||||
|
}
|
||||||
|
|
||||||
rocksdb::WriteOptions wo;
|
rocksdb::WriteOptions wo;
|
||||||
s = rocksutils::globalRocksDB()->Write(wo, &batch);
|
s = rocksutils::globalRocksDB()->Write(wo, &batch);
|
||||||
if (!s.ok()) {
|
if (!s.ok()) {
|
||||||
return rocksutils::convertStatus(s);
|
return rocksutils::convertStatus(s);
|
||||||
}
|
}
|
||||||
TRI_ASSERT(state->numRemoves() == _numberDocuments);
|
|
||||||
|
|
||||||
if (_numberDocuments > 64 * 1024) {
|
rocksdb::SequenceNumber seq = rocksutils::latestSequenceNumber();
|
||||||
|
uint64_t numDocs = _numberDocuments.exchange(0);
|
||||||
|
RocksDBSettingsManager::CounterAdjustment update(seq, /*numInserts*/0,
|
||||||
|
/*numRemoves*/numDocs, /*revision*/0);
|
||||||
|
engine->settingsManager()->updateCounter(_objectId, update);
|
||||||
|
if (numDocs > 64 * 1024) {
|
||||||
// also compact the ranges in order to speed up all further accesses
|
// also compact the ranges in order to speed up all further accesses
|
||||||
compact();
|
compact();
|
||||||
}
|
}
|
||||||
|
TRI_ASSERT(!state->hasOperations()); // not allowed
|
||||||
return Result{};
|
return Result{};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -518,10 +518,10 @@ class RocksDBCuckooIndexEstimator {
|
||||||
void removeBlocker(uint64_t trxId) {
|
void removeBlocker(uint64_t trxId) {
|
||||||
WRITE_LOCKER(locker, _lock);
|
WRITE_LOCKER(locker, _lock);
|
||||||
auto it = _blockers.find(trxId);
|
auto it = _blockers.find(trxId);
|
||||||
if (_blockers.end() != it) {
|
if (ADB_LIKELY(_blockers.end() != it)) {
|
||||||
auto cross = _blockersBySeq.find(std::make_pair(it->second, it->first));
|
auto cross = _blockersBySeq.find(std::make_pair(it->second, it->first));
|
||||||
TRI_ASSERT(_blockersBySeq.end() != cross);
|
TRI_ASSERT(_blockersBySeq.end() != cross);
|
||||||
if (_blockersBySeq.end() != cross) {
|
if (ADB_LIKELY(_blockersBySeq.end() != cross)) {
|
||||||
_blockersBySeq.erase(cross);
|
_blockersBySeq.erase(cross);
|
||||||
}
|
}
|
||||||
_blockers.erase(it);
|
_blockers.erase(it);
|
||||||
|
|
|
@ -291,6 +291,12 @@ void RocksDBEngine::validateOptions(
|
||||||
<< "supported on this platform";
|
<< "supported on this platform";
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
if (_pruneWaitTimeInitial < 10) {
|
||||||
|
LOG_TOPIC(WARN, arangodb::Logger::ENGINES)
|
||||||
|
<< "consider increasing the value for --rocksdb.wal-file-timeout-initial. "
|
||||||
|
<< "Replication clients might have trouble to get in sync";
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// preparation phase for storage engine. can be used for internal setup.
|
// preparation phase for storage engine. can be used for internal setup.
|
||||||
|
|
|
@ -277,15 +277,6 @@ void RocksDBTransactionCollection::addOperation(
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void RocksDBTransactionCollection::addTruncateOperation() {
|
|
||||||
TRI_ASSERT(_numInserts == 0 && _numUpdates == 0 && _numRemoves == 0);
|
|
||||||
if (!isLocked() || _accessType != AccessMode::Type::EXCLUSIVE) {
|
|
||||||
TRI_ASSERT(false);
|
|
||||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "collection must be exlusively locked");
|
|
||||||
}
|
|
||||||
_numRemoves += _initialNumberDocuments + _numInserts;
|
|
||||||
}
|
|
||||||
|
|
||||||
void RocksDBTransactionCollection::prepareCommit(uint64_t trxId,
|
void RocksDBTransactionCollection::prepareCommit(uint64_t trxId,
|
||||||
uint64_t preCommitSeq) {
|
uint64_t preCommitSeq) {
|
||||||
TRI_ASSERT(_collection != nullptr);
|
TRI_ASSERT(_collection != nullptr);
|
||||||
|
@ -347,8 +338,8 @@ void RocksDBTransactionCollection::commitCounts(uint64_t trxId,
|
||||||
auto ridx = static_cast<RocksDBIndex*>(idx.get());
|
auto ridx = static_cast<RocksDBIndex*>(idx.get());
|
||||||
auto estimator = ridx->estimator();
|
auto estimator = ridx->estimator();
|
||||||
if (estimator) {
|
if (estimator) {
|
||||||
estimator->bufferUpdates(commitSeq, std::move(pair.second.first),
|
estimator->bufferUpdates(commitSeq, std::move(pair.second.inserts),
|
||||||
std::move(pair.second.second));
|
std::move(pair.second.removals));
|
||||||
estimator->removeBlocker(trxId);
|
estimator->removeBlocker(trxId);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -363,13 +354,13 @@ void RocksDBTransactionCollection::commitCounts(uint64_t trxId,
|
||||||
void RocksDBTransactionCollection::trackIndexInsert(uint64_t idxObjectId,
|
void RocksDBTransactionCollection::trackIndexInsert(uint64_t idxObjectId,
|
||||||
uint64_t hash) {
|
uint64_t hash) {
|
||||||
// First list is Inserts
|
// First list is Inserts
|
||||||
_trackedIndexOperations[idxObjectId].first.emplace_back(hash);
|
_trackedIndexOperations[idxObjectId].inserts.emplace_back(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
void RocksDBTransactionCollection::trackIndexRemove(uint64_t idxObjectId,
|
void RocksDBTransactionCollection::trackIndexRemove(uint64_t idxObjectId,
|
||||||
uint64_t hash) {
|
uint64_t hash) {
|
||||||
// Second list is Removes
|
// Second list is Removes
|
||||||
_trackedIndexOperations[idxObjectId].second.emplace_back(hash);
|
_trackedIndexOperations[idxObjectId].removals.emplace_back(hash);
|
||||||
}
|
}
|
||||||
|
|
||||||
/// @brief lock a collection
|
/// @brief lock a collection
|
||||||
|
|
|
@ -89,11 +89,6 @@ class RocksDBTransactionCollection final : public TransactionCollection {
|
||||||
void addOperation(TRI_voc_document_operation_e operationType,
|
void addOperation(TRI_voc_document_operation_e operationType,
|
||||||
TRI_voc_rid_t revisionId);
|
TRI_voc_rid_t revisionId);
|
||||||
|
|
||||||
/// @brief will perform _numRemoves = _initialNumberDocuments
|
|
||||||
/// be aware that this is only a valid operation under an
|
|
||||||
/// exclusive collection lock
|
|
||||||
void addTruncateOperation();
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* @brief Prepare collection for commit by placing index blockers
|
* @brief Prepare collection for commit by placing index blockers
|
||||||
* @param trxId Active transaction ID
|
* @param trxId Active transaction ID
|
||||||
|
@ -142,12 +137,14 @@ class RocksDBTransactionCollection final : public TransactionCollection {
|
||||||
uint64_t _numRemoves;
|
uint64_t _numRemoves;
|
||||||
bool _usageLocked;
|
bool _usageLocked;
|
||||||
|
|
||||||
|
struct IndexOperations {
|
||||||
|
std::vector<uint64_t> inserts;
|
||||||
|
std::vector<uint64_t> removals;
|
||||||
|
};
|
||||||
|
|
||||||
/// @brief A list where all indexes with estimates can store their operations
|
/// @brief A list where all indexes with estimates can store their operations
|
||||||
/// Will be applied to the inserter on commit and not applied on abort
|
/// Will be applied to the inserter on commit and not applied on abort
|
||||||
std::unordered_map<uint64_t,
|
std::unordered_map<uint64_t, IndexOperations> _trackedIndexOperations;
|
||||||
std::pair<std::vector<uint64_t>, std::vector<uint64_t>>>
|
|
||||||
_trackedIndexOperations;
|
|
||||||
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,6 @@ RocksDBTransactionState::RocksDBTransactionState(
|
||||||
_readSnapshot(nullptr),
|
_readSnapshot(nullptr),
|
||||||
_rocksReadOptions(),
|
_rocksReadOptions(),
|
||||||
_cacheTx(nullptr),
|
_cacheTx(nullptr),
|
||||||
_numCommits(0),
|
|
||||||
_numInserts(0),
|
_numInserts(0),
|
||||||
_numUpdates(0),
|
_numUpdates(0),
|
||||||
_numRemoves(0),
|
_numRemoves(0),
|
||||||
|
@ -286,6 +285,7 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
|
||||||
// begin transaction + commit transaction + n doc removes
|
// begin transaction + commit transaction + n doc removes
|
||||||
TRI_ASSERT(_numLogdata == (2 + _numRemoves));
|
TRI_ASSERT(_numLogdata == (2 + _numRemoves));
|
||||||
}
|
}
|
||||||
|
++_numCommits;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
// prepare for commit on each collection, e.g. place blockers for estimators
|
// prepare for commit on each collection, e.g. place blockers for estimators
|
||||||
|
@ -318,12 +318,20 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
|
||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
++_numCommits;
|
// total number of sequence ID consuming records
|
||||||
|
uint64_t numOps = _rocksTransaction->GetNumPuts() +
|
||||||
|
_rocksTransaction->GetNumDeletes() +
|
||||||
|
_rocksTransaction->GetNumMerges();
|
||||||
|
// will invaliate all counts
|
||||||
result = rocksutils::convertStatus(_rocksTransaction->Commit());
|
result = rocksutils::convertStatus(_rocksTransaction->Commit());
|
||||||
|
|
||||||
if (result.ok()) {
|
if (result.ok()) {
|
||||||
rocksdb::SequenceNumber latestSeq =
|
TRI_ASSERT(numOps > 0); // simon: should hold unless we're beeing stupid
|
||||||
rocksutils::globalRocksDB()->GetLatestSequenceNumber();
|
rocksdb::SequenceNumber postCommitSeq = _rocksTransaction->GetCommitedSeqNumber();
|
||||||
|
if (ADB_LIKELY(numOps > 0)) {
|
||||||
|
postCommitSeq += numOps - 1; // add to get to the next batch
|
||||||
|
}
|
||||||
|
TRI_ASSERT(postCommitSeq <= rocksutils::globalRocksDB()->GetLatestSequenceNumber());
|
||||||
|
|
||||||
for (auto& trxCollection : _collections) {
|
for (auto& trxCollection : _collections) {
|
||||||
RocksDBTransactionCollection* collection =
|
RocksDBTransactionCollection* collection =
|
||||||
|
@ -331,7 +339,7 @@ arangodb::Result RocksDBTransactionState::internalCommit() {
|
||||||
// we need this in case of an intermediate commit. The number of
|
// we need this in case of an intermediate commit. The number of
|
||||||
// initial documents is adjusted and numInserts / removes is set to 0
|
// initial documents is adjusted and numInserts / removes is set to 0
|
||||||
// index estimator updates are buffered
|
// index estimator updates are buffered
|
||||||
collection->commitCounts(id(), latestSeq);
|
collection->commitCounts(id(), postCommitSeq);
|
||||||
committed = true;
|
committed = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -553,23 +561,6 @@ Result RocksDBTransactionState::addOperation(
|
||||||
return checkIntermediateCommit(currentSize, hasPerformedIntermediateCommit);
|
return checkIntermediateCommit(currentSize, hasPerformedIntermediateCommit);
|
||||||
}
|
}
|
||||||
|
|
||||||
// only a valid under an exlusive lock as an only operation
|
|
||||||
void RocksDBTransactionState::addTruncateOperation(TRI_voc_cid_t cid) {
|
|
||||||
auto tcoll = static_cast<RocksDBTransactionCollection*>(findCollection(cid));
|
|
||||||
if (tcoll == nullptr) {
|
|
||||||
std::string message = "collection '" + std::to_string(cid) +
|
|
||||||
"' not found in transaction state";
|
|
||||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message);
|
|
||||||
}
|
|
||||||
tcoll->addTruncateOperation();
|
|
||||||
_numRemoves += tcoll->numRemoves();
|
|
||||||
TRI_ASSERT(_numInserts == 0 && _numUpdates == 0);
|
|
||||||
TRI_ASSERT(!hasHint(transaction::Hints::Hint::SINGLE_OPERATION));
|
|
||||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
|
||||||
_numLogdata += _numRemoves; // cheat our own sanity checks
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
RocksDBMethods* RocksDBTransactionState::rocksdbMethods() {
|
RocksDBMethods* RocksDBTransactionState::rocksdbMethods() {
|
||||||
TRI_ASSERT(_rocksMethods);
|
TRI_ASSERT(_rocksMethods);
|
||||||
return _rocksMethods.get();
|
return _rocksMethods.get();
|
||||||
|
|
|
@ -92,7 +92,9 @@ class RocksDBTransactionState final : public TransactionState {
|
||||||
/// @brief abort a transaction
|
/// @brief abort a transaction
|
||||||
Result abortTransaction(transaction::Methods* trx) override;
|
Result abortTransaction(transaction::Methods* trx) override;
|
||||||
|
|
||||||
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
uint64_t numCommits() const { return _numCommits; }
|
uint64_t numCommits() const { return _numCommits; }
|
||||||
|
#endif
|
||||||
uint64_t numInserts() const { return _numInserts; }
|
uint64_t numInserts() const { return _numInserts; }
|
||||||
uint64_t numUpdates() const { return _numUpdates; }
|
uint64_t numUpdates() const { return _numUpdates; }
|
||||||
uint64_t numRemoves() const { return _numRemoves; }
|
uint64_t numRemoves() const { return _numRemoves; }
|
||||||
|
@ -117,11 +119,7 @@ class RocksDBTransactionState final : public TransactionState {
|
||||||
TRI_voc_rid_t revisionId, TRI_voc_document_operation_e opType,
|
TRI_voc_rid_t revisionId, TRI_voc_document_operation_e opType,
|
||||||
bool& hasPerformedIntermediateCommit);
|
bool& hasPerformedIntermediateCommit);
|
||||||
|
|
||||||
/// @brief will perform _numRemoves = _initialNumberDocuments
|
/// @brief return wrapper around rocksdb transaction
|
||||||
/// be aware that this is only a valid operation under an
|
|
||||||
/// exclusive collection lock
|
|
||||||
void addTruncateOperation(TRI_voc_cid_t cid);
|
|
||||||
|
|
||||||
RocksDBMethods* rocksdbMethods();
|
RocksDBMethods* rocksdbMethods();
|
||||||
|
|
||||||
/// @brief insert a snapshot into a (not yet started) transaction.
|
/// @brief insert a snapshot into a (not yet started) transaction.
|
||||||
|
@ -202,17 +200,17 @@ class RocksDBTransactionState final : public TransactionState {
|
||||||
/// @brief wrapper to use outside this class to access rocksdb
|
/// @brief wrapper to use outside this class to access rocksdb
|
||||||
std::unique_ptr<RocksDBMethods> _rocksMethods;
|
std::unique_ptr<RocksDBMethods> _rocksMethods;
|
||||||
|
|
||||||
uint64_t _numCommits;
|
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||||
|
/// store the number of log entries in WAL
|
||||||
|
uint64_t _numLogdata = 0;
|
||||||
|
uint64_t _numCommits = 0;
|
||||||
|
#endif
|
||||||
// if a transaction gets bigger than these values then an automatic
|
// if a transaction gets bigger than these values then an automatic
|
||||||
// intermediate commit will be done
|
// intermediate commit will be done
|
||||||
uint64_t _numInserts;
|
uint64_t _numInserts;
|
||||||
uint64_t _numUpdates;
|
uint64_t _numUpdates;
|
||||||
uint64_t _numRemoves;
|
uint64_t _numRemoves;
|
||||||
|
|
||||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
|
||||||
/// store the number of log entries in WAL
|
|
||||||
uint64_t _numLogdata = 0;
|
|
||||||
#endif
|
|
||||||
SmallVector<RocksDBKey*, 32>::allocator_type::arena_type _arena;
|
SmallVector<RocksDBKey*, 32>::allocator_type::arena_type _arena;
|
||||||
SmallVector<RocksDBKey*, 32> _keys;
|
SmallVector<RocksDBKey*, 32> _keys;
|
||||||
/// @brief if true there key buffers will no longer be shared
|
/// @brief if true there key buffers will no longer be shared
|
||||||
|
|
|
@ -167,27 +167,3 @@ std::string Exception::FillFormatExceptionString(char const* format, ...) {
|
||||||
return std::string(buffer);
|
return std::string(buffer);
|
||||||
}
|
}
|
||||||
|
|
||||||
Result basics::catchToResult(std::function<Result()> fn, int defaultError) {
|
|
||||||
// TODO check whether there are other specific exceptions we should catch
|
|
||||||
Result result{TRI_ERROR_NO_ERROR};
|
|
||||||
try {
|
|
||||||
result = fn();
|
|
||||||
} catch (arangodb::basics::Exception const& e) {
|
|
||||||
result.reset(e.code(), e.message());
|
|
||||||
} catch (std::bad_alloc const&) {
|
|
||||||
result.reset(TRI_ERROR_OUT_OF_MEMORY);
|
|
||||||
} catch (std::exception const& e) {
|
|
||||||
result.reset(defaultError, e.what());
|
|
||||||
} catch (...) {
|
|
||||||
result.reset(defaultError);
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
Result basics::catchVoidToResult(std::function<void()> fn, int defaultError) {
|
|
||||||
std::function<Result()> wrapped = [&fn]() -> Result {
|
|
||||||
fn();
|
|
||||||
return Result{TRI_ERROR_NO_ERROR};
|
|
||||||
};
|
|
||||||
return catchToResult(wrapped, defaultError);
|
|
||||||
}
|
|
||||||
|
|
|
@ -106,10 +106,32 @@ class Exception final : public virtual std::exception {
|
||||||
int const _code;
|
int const _code;
|
||||||
};
|
};
|
||||||
|
|
||||||
Result catchToResult(std::function<Result()> fn,
|
template<typename F>
|
||||||
int defaultError = TRI_ERROR_INTERNAL);
|
Result catchToResult(F&& fn, int defaultError = TRI_ERROR_INTERNAL) {
|
||||||
Result catchVoidToResult(std::function<void()> fn,
|
// TODO check whether there are other specific exceptions we should catch
|
||||||
int defaultError = TRI_ERROR_INTERNAL);
|
Result result{TRI_ERROR_NO_ERROR};
|
||||||
|
try {
|
||||||
|
result = std::forward<F>(fn)();
|
||||||
|
} catch (arangodb::basics::Exception const& e) {
|
||||||
|
result.reset(e.code(), e.message());
|
||||||
|
} catch (std::bad_alloc const&) {
|
||||||
|
result.reset(TRI_ERROR_OUT_OF_MEMORY);
|
||||||
|
} catch (std::exception const& e) {
|
||||||
|
result.reset(defaultError, e.what());
|
||||||
|
} catch (...) {
|
||||||
|
result.reset(defaultError);
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
template<typename F>
|
||||||
|
Result catchVoidToResult(F&& fn, int defaultError = TRI_ERROR_INTERNAL) {
|
||||||
|
auto wrapped = [&fn]() -> Result {
|
||||||
|
std::forward<F>(fn)();
|
||||||
|
return Result{TRI_ERROR_NO_ERROR};
|
||||||
|
};
|
||||||
|
return catchToResult(wrapped, defaultError);
|
||||||
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -92,7 +92,7 @@ function getClusterEndpoints() {
|
||||||
});
|
});
|
||||||
assertTrue(res instanceof request.Response);
|
assertTrue(res instanceof request.Response);
|
||||||
assertTrue(res.hasOwnProperty('statusCode'), JSON.stringify(res));
|
assertTrue(res.hasOwnProperty('statusCode'), JSON.stringify(res));
|
||||||
assertTrue(res.statusCode === 200, JSON.stringify(res));
|
assertEqual(res.statusCode, 200, JSON.stringify(res));
|
||||||
assertTrue(res.hasOwnProperty('json'));
|
assertTrue(res.hasOwnProperty('json'));
|
||||||
assertTrue(res.json.hasOwnProperty('endpoints'));
|
assertTrue(res.json.hasOwnProperty('endpoints'));
|
||||||
assertTrue(res.json.endpoints instanceof Array);
|
assertTrue(res.json.endpoints instanceof Array);
|
||||||
|
|
|
@ -94,7 +94,7 @@ function getClusterEndpoints() {
|
||||||
});
|
});
|
||||||
assertTrue(res instanceof request.Response);
|
assertTrue(res instanceof request.Response);
|
||||||
assertTrue(res.hasOwnProperty('statusCode'), JSON.stringify(res));
|
assertTrue(res.hasOwnProperty('statusCode'), JSON.stringify(res));
|
||||||
assertTrue(res.statusCode === 200, JSON.stringify(res));
|
assertEqual(res.statusCode, 200, JSON.stringify(res));
|
||||||
assertTrue(res.hasOwnProperty('json'));
|
assertTrue(res.hasOwnProperty('json'));
|
||||||
assertTrue(res.json.hasOwnProperty('endpoints'));
|
assertTrue(res.json.hasOwnProperty('endpoints'));
|
||||||
assertTrue(res.json.endpoints instanceof Array);
|
assertTrue(res.json.endpoints instanceof Array);
|
||||||
|
@ -124,7 +124,8 @@ function getApplierState(endpoint) {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
assertTrue(res instanceof request.Response);
|
assertTrue(res instanceof request.Response);
|
||||||
assertTrue(res.hasOwnProperty('statusCode') && res.statusCode === 200);
|
assertTrue(res.hasOwnProperty('statusCode'));
|
||||||
|
assertEqual(res.statusCode, 200, JSON.stringify(res));
|
||||||
assertTrue(res.hasOwnProperty('json'));
|
assertTrue(res.hasOwnProperty('json'));
|
||||||
return arangosh.checkRequestResult(res.json);
|
return arangosh.checkRequestResult(res.json);
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,5 @@
|
||||||
/* jshint globalstrict:false, strict:false, unused: false */
|
/* jshint globalstrict:false, strict:false, unused: false */
|
||||||
/* global assertEqual, assertFalse, assertNull, assertNotNull */
|
/* global assertEqual, assertFalse, assertNull, assertNotNull, fail */
|
||||||
// //////////////////////////////////////////////////////////////////////////////
|
// //////////////////////////////////////////////////////////////////////////////
|
||||||
// / @brief tests for transactions
|
// / @brief tests for transactions
|
||||||
// /
|
// /
|
||||||
|
@ -85,6 +85,20 @@ function recoverySuite () {
|
||||||
assertEqual([], db._query(query, { "@collection": c.name(), value: i }).toArray());
|
assertEqual([], db._query(query, { "@collection": c.name(), value: i }).toArray());
|
||||||
assertEqual([], c.edges("test/" + i));
|
assertEqual([], c.edges("test/" + i));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
internal.waitForEstimatorSync(); // make sure estimates are consistent
|
||||||
|
let indexes = c.getIndexes(true);
|
||||||
|
for (let i of indexes) {
|
||||||
|
switch (i.type) {
|
||||||
|
case 'primary':
|
||||||
|
case 'hash':
|
||||||
|
case 'edge':
|
||||||
|
assertEqual(i.selectivityEstimate, 1, JSON.stringify(i));
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
fail();
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
|
@ -150,9 +150,7 @@ function CollectionTruncateFailuresSuite() {
|
||||||
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
|
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
|
||||||
}
|
}
|
||||||
|
|
||||||
// All docments should be removed through intermediate commits.
|
// all commits failed, no documents removed
|
||||||
// We have two packs that fill up those commits.
|
|
||||||
// Now validate that we endup with an empty collection.
|
|
||||||
assertEqual(c.count(), 20000);
|
assertEqual(c.count(), 20000);
|
||||||
|
|
||||||
// Test Primary
|
// Test Primary
|
||||||
|
@ -226,9 +224,7 @@ function CollectionTruncateFailuresSuite() {
|
||||||
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
|
assertEqual(e.errorNum, ERRORS.ERROR_DEBUG.code);
|
||||||
}
|
}
|
||||||
|
|
||||||
// All docments should be removed through intermediate commits.
|
// At 10k removals a intermediate commit happens, then a fail
|
||||||
// We have two packs that fill up those commits.
|
|
||||||
// Now validate that we endup with an empty collection.
|
|
||||||
assertEqual(c.count(), 10000);
|
assertEqual(c.count(), 10000);
|
||||||
|
|
||||||
// Test Primary
|
// Test Primary
|
||||||
|
|
Loading…
Reference in New Issue