1
0
Fork 0

Bug fix/restore unlock (#4387)

This commit is contained in:
Jan 2018-01-25 15:56:27 +01:00 committed by GitHub
parent 739d483724
commit fe0fca9029
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 89 additions and 112 deletions

View File

@ -757,8 +757,8 @@ Result MMFilesSkiplistIndex::insert(transaction::Methods* trx,
}
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED && !_unique) {
// We ignore unique_constraint violated if we are not unique
res = TRI_ERROR_NO_ERROR;
// We ignore unique_constraint violated if we are not unique
res = TRI_ERROR_NO_ERROR;
}
break;

View File

@ -246,7 +246,7 @@ void GraphStore<V, E>::loadDocument(WorkerConfig* config,
Result res = trx->documentFastPathLocal(vertexShard, StringRef(_key),
mmdr, true);
if (res.fail()) {
THROW_ARANGO_EXCEPTION(res.errorNumber());
THROW_ARANGO_EXCEPTION(res);
}
VPackSlice doc(mmdr.vpack());

View File

@ -520,16 +520,10 @@ Result Syncer::createCollection(TRI_vocbase_t* vocbase,
TRI_ASSERT(!simulate32Client()); // < 3.3 should never get here
if (col->isSystem()) {
TRI_ASSERT(col->globallyUniqueId() == col->name());
CollectionGuard guard(vocbase, col);
if (guard.collection() == nullptr) {
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
}
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(vocbase),
guard.collection()->cid(), AccessMode::Type::WRITE);
col->cid(), AccessMode::Type::WRITE);
// already locked by guard above
trx.addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
Result res = trx.begin();
if (!res.ok()) {
return res;
@ -622,16 +616,8 @@ Result Syncer::createIndex(VPackSlice const& slice) {
}
try {
CollectionGuard guard(vocbase, col);
if (guard.collection() == nullptr) {
return Result(TRI_ERROR_ARANGO_COLLECTION_NOT_FOUND);
}
SingleCollectionTransaction trx(transaction::StandaloneContext::Create(vocbase),
guard.collection()->cid(), AccessMode::Type::WRITE);
// already locked by guard above
trx.addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
col->cid(), AccessMode::Type::WRITE);
Result res = trx.begin();
@ -639,7 +625,7 @@ Result Syncer::createIndex(VPackSlice const& slice) {
return res;
}
auto physical = guard.collection()->getPhysical();
auto physical = trx.documentCollection()->getPhysical();
TRI_ASSERT(physical != nullptr);
std::shared_ptr<arangodb::Index> idx;
res = physical->restoreIndex(&trx, indexSlice, idx);

View File

@ -74,8 +74,9 @@ LogicalCollection* RestIndexHandler::collection(
}
// //////////////////////////////////////////////////////////////////////////////
// / @brief get database infos
// / @brief get index infos
// //////////////////////////////////////////////////////////////////////////////
RestStatus RestIndexHandler::getIndexes() {
std::shared_ptr<LogicalCollection> tmpColl;
std::vector<std::string> const& suffixes = _request->suffixes();

View File

@ -1540,17 +1540,8 @@ int RestReplicationHandler::processRestoreIndexes(VPackSlice const& collection,
// look up the collection
try {
CollectionGuard guard(_vocbase, name.c_str());
LogicalCollection* collection = guard.collection();
auto ctx = transaction::StandaloneContext::Create(_vocbase);
SingleCollectionTransaction trx(ctx, collection->cid(),
AccessMode::Type::EXCLUSIVE);
// collection status lock was already acquired by collection guard
// above
trx.addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
SingleCollectionTransaction trx(ctx, name, AccessMode::Type::EXCLUSIVE);
Result res = trx.begin();
@ -1560,6 +1551,7 @@ int RestReplicationHandler::processRestoreIndexes(VPackSlice const& collection,
THROW_ARANGO_EXCEPTION(res);
}
LogicalCollection* collection = trx.documentCollection();
auto physical = collection->getPhysical();
TRI_ASSERT(physical != nullptr);
for (VPackSlice const& idxDef : VPackArrayIterator(indexes)) {

View File

@ -71,7 +71,7 @@ arangodb::Result globalRocksDBPut(rocksdb::ColumnFamilyHandle* cf,
TRI_ASSERT(cf != nullptr);
auto status = globalRocksDB()->Put(options, cf, key, val);
return convertStatus(status);
};
}
arangodb::Result globalRocksDBRemove(rocksdb::ColumnFamilyHandle* cf,
rocksdb::Slice const& key,
@ -79,12 +79,12 @@ arangodb::Result globalRocksDBRemove(rocksdb::ColumnFamilyHandle* cf,
TRI_ASSERT(cf != nullptr);
auto status = globalRocksDB()->Delete(options, cf, key);
return convertStatus(status);
};
}
uint64_t latestSequenceNumber() {
auto seq = globalRocksDB()->GetLatestSequenceNumber();
return static_cast<uint64_t>(seq);
};
}
void addCollectionMapping(uint64_t objectId, TRI_voc_tick_t did,
TRI_voc_cid_t cid) {

View File

@ -428,7 +428,7 @@ RocksDBEdgeIndex::~RocksDBEdgeIndex() {}
double RocksDBEdgeIndex::selectivityEstimateLocal(
arangodb::StringRef const* attribute) const {
if (attribute != nullptr && attribute->compare(_directionAttr)) {
return 0;
return 0.0;
}
TRI_ASSERT(_estimator != nullptr);
return _estimator->computeEstimate();

View File

@ -197,8 +197,8 @@ class RocksDBEdgeIndex final : public RocksDBIndex {
private:
std::string _directionAttr;
bool _isFromIndex;
std::string const _directionAttr;
bool const _isFromIndex;
/// @brief A fixed size library to estimate the selectivity of the index.
/// On insertion of a document we have to insert it into the estimator,

View File

@ -759,7 +759,7 @@ void RocksDBEngine::getCollectionInfo(TRI_vocbase_t* vocbase, TRI_voc_cid_t cid,
auto result = rocksutils::convertStatus(res);
if (result.errorNumber() != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION(result.errorNumber());
THROW_ARANGO_EXCEPTION(result);
}
VPackSlice fullParameters = RocksDBValue::data(value);
@ -1223,7 +1223,7 @@ void RocksDBEngine::createView(TRI_vocbase_t* vocbase, TRI_voc_cid_t id,
auto status = rocksutils::convertStatus(res);
if (!status.ok()) {
THROW_ARANGO_EXCEPTION(status.errorNumber());
THROW_ARANGO_EXCEPTION(status);
}
}

View File

@ -53,30 +53,24 @@ RocksDBExportCursor::RocksDBExportCursor(
_resolver(vocbase),
_restrictions(restrictions),
_name(name) {
// prevent the collection from being unloaded while the export is ongoing
// this may throw
_collectionGuard.reset(
new arangodb::CollectionGuard(vocbase, _name.c_str(), false));
_collection = _collectionGuard->collection();
_trx.reset(new SingleCollectionTransaction(
transaction::StandaloneContext::Create(vocbase), _name,
AccessMode::Type::READ));
// already locked by guard above
_trx->addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
Result res = _trx->begin();
if (!res.ok()) {
THROW_ARANGO_EXCEPTION(res);
}
LogicalCollection* collection = _trx->documentCollection();
TRI_ASSERT(collection != nullptr);
auto rocksCollection =
static_cast<RocksDBCollection*>(_collection->getPhysical());
static_cast<RocksDBCollection*>(collection->getPhysical());
_iter = rocksCollection->getAllIterator(_trx.get(), false);
_size = _collection->numberDocuments(_trx.get());
_size = collection->numberDocuments(_trx.get());
if (limit > 0 && limit < _size) {
_size = limit;
}

View File

@ -33,11 +33,8 @@
#include "VocBase/voc-types.h"
namespace arangodb {
namespace transaction {
class Methods;
}
class IndexIterator;
class SingleCollectionTransaction;
class RocksDBExportCursor final : public Cursor {
public:
@ -63,9 +60,7 @@ class RocksDBExportCursor final : public Cursor {
arangodb::CollectionNameResolver _resolver;
CollectionExport::Restrictions _restrictions;
std::string const _name;
std::unique_ptr<arangodb::CollectionGuard> _collectionGuard;
LogicalCollection* _collection;
std::unique_ptr<transaction::Methods> _trx;
std::unique_ptr<SingleCollectionTransaction> _trx;
std::unique_ptr<IndexIterator> _iter;
size_t _size;
};

View File

@ -102,8 +102,9 @@ rocksdb::Comparator const* RocksDBIndex::comparator() const {
void RocksDBIndex::toVelocyPackFigures(VPackBuilder& builder) const {
TRI_ASSERT(builder.isOpenObject());
Index::toVelocyPackFigures(builder);
builder.add("cacheInUse", VPackValue(useCache()));
if (useCache()) {
bool cacheInUse = useCache();
builder.add("cacheInUse", VPackValue(cacheInUse));
if (cacheInUse) {
builder.add("cacheSize", VPackValue(_cache->size()));
auto hitRates = _cache->hitRates();
double rate = hitRates.first;
@ -125,7 +126,6 @@ void RocksDBIndex::load() {
void RocksDBIndex::unload() {
if (useCache()) {
// LOG_TOPIC(ERR, Logger::FIXME) << "unload cache";
destroyCache();
TRI_ASSERT(!_cachePresent);
}

View File

@ -182,8 +182,8 @@ arangodb::Result RocksDBTrxMethods::Get(rocksdb::ColumnFamilyHandle* cf,
rocksdb::ReadOptions const& ro = _state->_rocksReadOptions;
TRI_ASSERT(ro.snapshot != nullptr);
rocksdb::Status s = _state->_rocksTransaction->Get(ro, cf, key, val);
if(!s.ok()){
rv = rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBTrxMethods");
if (!s.ok()) {
rv = rocksutils::convertStatus(s, rocksutils::StatusHint::document, "", "Get - in RocksDBTrxMethods");
}
return rv;
}

View File

@ -278,7 +278,6 @@ Result RocksDBPrimaryIndex::removeInternal(transaction::Methods* trx,
// acquire rocksdb transaction
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx);
Result r = mthds->Delete(_cf, key.ref());
// rocksutils::convertStatus(status, rocksutils::StatusHint::index);
return IndexResult(r.errorNumber(), this);
}

View File

@ -328,7 +328,7 @@ int RocksDBTransactionCollection::doLock(AccessMode::Type type,
_lockType = type;
return TRI_ERROR_NO_ERROR;
}
if (_transaction->hasHint(transaction::Hints::Hint::LOCK_NEVER)) {
// never lock
return TRI_ERROR_NO_ERROR;

View File

@ -87,6 +87,9 @@ Result RocksDBTransactionState::beginTransaction(transaction::Hints hints) {
LOG_TRX(this, _nestingLevel) << "beginning " << AccessMode::typeString(_type)
<< " transaction";
TRI_ASSERT(!hasHint(transaction::Hints::Hint::NO_USAGE_LOCK) || !AccessMode::isWriteOrExclusive(_type));
if (_nestingLevel == 0) {
// set hints
_hints = hints;
@ -295,7 +298,7 @@ Result RocksDBTransactionState::commitTransaction(
transaction::Methods* activeTrx) {
LOG_TRX(this, _nestingLevel) << "committing " << AccessMode::typeString(_type)
<< " transaction";
TRI_ASSERT(_status == transaction::Status::RUNNING);
TRI_IF_FAILURE("TransactionWriteCommitMarker") {
return Result(TRI_ERROR_DEBUG);

View File

@ -95,7 +95,7 @@ void RocksDBView::drop() {
batch.Delete(RocksDBColumnFamily::definitions(), key.string());
auto status = rocksutils::convertStatus(db->Write(wo, &batch));
if (!status.ok()) {
THROW_ARANGO_EXCEPTION(status.errorNumber());
THROW_ARANGO_EXCEPTION(status);
}
}

View File

@ -39,12 +39,12 @@ class Hints {
SINGLE_OPERATION = 1,
LOCK_ENTIRELY = 2,
LOCK_NEVER = 4,
NO_BEGIN_MARKER = 8,
NO_ABORT_MARKER = 16,
NO_THROTTLING = 32,
NO_BEGIN_MARKER = 8, // not supported in RocksDB
NO_ABORT_MARKER = 16, // not supported in RocksDB
NO_THROTTLING = 32, // not supported in RocksDB
TRY_LOCK = 64,
NO_COMPACTION_LOCK = 128,
NO_USAGE_LOCK = 256,
NO_COMPACTION_LOCK = 128, // not supported in RocksDB
NO_USAGE_LOCK = 256, // not supported in RocksDB
RECOVERY = 512,
NO_DLD = 1024 // disable deadlock detection
};

View File

@ -278,22 +278,32 @@ Result Collections::properties(LogicalCollection* coll, VPackBuilder& builder) {
"allowUserKeys", "cid", "count", "deleted", "id", "indexes", "name",
"path", "planId", "shards", "status", "type", "version"};
// this transaction is held longer than the following if...
std::unique_ptr<SingleCollectionTransaction> trx;
if (!ServerState::instance()->isCoordinator()) {
auto ctx = transaction::V8Context::CreateWhenRequired(coll->vocbase(), true);
trx.reset(new SingleCollectionTransaction(ctx, coll->cid(),
AccessMode::Type::READ));
trx->addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
Result res = trx->begin();
// These are only relevant for cluster
ignoreKeys.insert({"distributeShardsLike", "isSmart", "numberOfShards",
"replicationFactor", "shardKeys"});
auto ctx = transaction::V8Context::CreateWhenRequired(coll->vocbase(), true);
// populate the transaction object (which is used outside this if too)
trx.reset(new SingleCollectionTransaction(ctx, coll->cid(),
AccessMode::Type::READ));
// we actually need this hint here, so that the collection is not
// loaded if it has status unloaded.
trx->addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
Result res = trx->begin();
if (res.fail()) {
return res;
}
}
// note that we have an ongoing transaction here if we are in single-server
// case
VPackBuilder props = coll->toVelocyPackIgnore(ignoreKeys, true, false);
TRI_ASSERT(builder.isOpenObject());
builder.add(VPackObjectIterator(props.slice()));

View File

@ -140,14 +140,16 @@ arangodb::Result Indexes::getAll(LogicalCollection const* collection,
tmp.close();
} else {
// add locks for consistency
SingleCollectionTransaction trx(
transaction::StandaloneContext::Create(collection->vocbase()),
collection->cid(), AccessMode::Type::READ);
// we actually need this hint here, so that the collection is not
// loaded if it has status unloaded.
trx.addHint(transaction::Hints::Hint::NO_USAGE_LOCK);
Result res = trx.begin();
if (!res.ok()) {
return res;
}

View File

@ -454,12 +454,12 @@ function CollectionSuite () {
inital[i] = idx.figures;
}
});
for(let i=0;i<10000;i++) {
c.outEdges("c/v"+(i/100));
c.inEdges("c/v"+(i/100));
for(let i = 0; i < 10000; i++) {
c.outEdges("c/v" + (i / 100));
c.inEdges("c/v" + (i / 100));
}
idxs = c.getIndexes(true);
// cache was filled same queries, hit rate must be biglier
// cache was filled with same queries, hit rate must be higher
idxs.forEach(function(idx, i) {
if (idx.figures.cacheInUse) {
assertTrue(Math.abs(inital[i].cacheSize - idx.figures.cacheSize) < 1024);
@ -470,14 +470,16 @@ function CollectionSuite () {
// cache size should be 0 after unload
c.unload();
idxs = c.getIndexes(true);
idxs.forEach(function(idx) {
assertEqual(idx.figures.cacheSize, 0, idx);
idxs.forEach(function(idx, i) {
if (idx.figures.cacheInUse) {
assertTrue(Math.abs(inital[i].cacheSize > idx.figures.cacheSize));
}
});
// lets do some reads
for(let i=0;i<10000;i++) {
c.outEdges("c/v"+(i/100));
c.inEdges("c/v"+(i/100));
for(let i = 0; i < 10000; i++) {
c.outEdges("c/v" + (i / 100));
c.inEdges("c/v" + (i / 100));
}
idxs = c.getIndexes(true);
// cache was empty, hit rate should be 0
@ -488,9 +490,9 @@ function CollectionSuite () {
}
});
for(let i=0;i<10000;i++) {
c.outEdges("c/v"+(i/100));
c.inEdges("c/v"+(i/100));
for(let i = 0; i < 10000; i++) {
c.outEdges("c/v" + (i / 100));
c.inEdges("c/v" + (i / 100));
}
idxs = c.getIndexes(true);
// cache was partially filled same queries, lifetime hit rate

View File

@ -81,10 +81,12 @@ bool ReadWriteLock::tryReadLock() {
/// @brief releases the read-lock or write-lock
void ReadWriteLock::unlock() {
std::unique_lock<std::mutex> guard(_mut);
TRI_ASSERT(_state >= -1);
if (_state == -1) {
_state = 0;
_bell.notify_all();
} else {
TRI_ASSERT(_state > 0);
_state -= 1;
if (_state == 0) {
_bell.notify_all();

View File

@ -46,54 +46,45 @@
/// @brief cause a segmentation violation
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
void TRI_SegfaultDebugging(char const*);
void TRI_SegfaultDebugging(char const* value);
#else
static inline void TRI_SegfaultDebugging(char const* unused) { (void)unused; }
inline constexpr void TRI_SegfaultDebugging(char const*) {}
#endif
/// @brief check whether we should fail at a failure point
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
bool TRI_ShouldFailDebugging(char const*);
bool TRI_ShouldFailDebugging(char const* value);
#else
static inline bool TRI_ShouldFailDebugging(char const* unused) {
(void)unused;
return false;
}
inline constexpr bool TRI_ShouldFailDebugging(char const*) { return false; }
#endif
/// @brief add a failure point
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
void TRI_AddFailurePointDebugging(char const*);
void TRI_AddFailurePointDebugging(char const* value);
#else
static inline void TRI_AddFailurePointDebugging(char const* unused) {
(void)unused;
}
inline constexpr void TRI_AddFailurePointDebugging(char const*) {}
#endif
/// @brief remove a failure point
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
void TRI_RemoveFailurePointDebugging(char const*);
void TRI_RemoveFailurePointDebugging(char const* value);
#else
static inline void TRI_RemoveFailurePointDebugging(char const* unused) {
(void)unused;
}
inline constexpr void TRI_RemoveFailurePointDebugging(char const*) {}
#endif
/// @brief clear all failure points
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
void TRI_ClearFailurePointsDebugging(void);
void TRI_ClearFailurePointsDebugging();
#else
static inline void TRI_ClearFailurePointsDebugging(void) {}
inline constexpr void TRI_ClearFailurePointsDebugging() {}
#endif
/// @brief returns whether failure point debugging can be used
static inline bool TRI_CanUseFailurePointsDebugging(void) {
#ifdef ARANGODB_ENABLE_FAILURE_TESTS
return true;
inline constexpr bool TRI_CanUseFailurePointsDebugging() { return true; }
#else
return false;
inline constexpr bool TRI_CanUseFailurePointsDebugging() { return false; }
#endif
}
/// @brief appends a backtrace to the string provided
void TRI_GetBacktrace(std::string& btstr);