1
0
Fork 0

Bug fix 3.4/reduce arangosearch rwlock waits (#9622)

This commit is contained in:
Jan 2019-08-02 14:26:17 +02:00 committed by GitHub
parent d64ad7ac0b
commit ab847839df
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 19 additions and 15 deletions

View File

@ -99,9 +99,8 @@ void read_write_mutex::lock_read() {
SCOPED_LOCK_NAMED(mutex_, lock);
// yield if there is already a writer waiting
if (exclusive_count_) {
// wait for notification (possibly with writers waiting) or no more writers waiting
while (std::cv_status::timeout == reader_cond_.wait_for(lock, std::chrono::milliseconds(1000)) && exclusive_count_) {}
// wait for notification (possibly with writers waiting) or no more writers waiting
while (exclusive_count_ && std::cv_status::timeout == reader_cond_.wait_for(lock, std::chrono::milliseconds(100))) {
}
++concurrent_count_;
@ -121,7 +120,7 @@ void read_write_mutex::lock_write() {
// wait until lock is held exclusively by the current thread
while (concurrent_count_) {
try {
writer_cond_.wait_for(lock, std::chrono::milliseconds(1000));
writer_cond_.wait_for(lock, std::chrono::milliseconds(100));
} catch (...) {
// 'wait_for' may throw according to specification
}
@ -179,7 +178,7 @@ void read_write_mutex::unlock(bool exclusive_only /*= false*/) {
// if have write lock
if (owns_write()) {
if (exclusive_owner_recursion_count_) {
if (!exclusive_only) { // a recursively locked mutex is alway top-level write locked
if (!exclusive_only) { // a recursively locked mutex is always top-level write locked
--exclusive_owner_recursion_count_; // write recursion unlock one level
}
@ -187,13 +186,12 @@ void read_write_mutex::unlock(bool exclusive_only /*= false*/) {
}
ADOPT_SCOPED_LOCK_NAMED(mutex_, lock);
static std::thread::id unowned;
if (exclusive_only) {
++concurrent_count_; // aquire the read-lock
++concurrent_count_; // acquire the read-lock
}
exclusive_owner_.store(unowned);
exclusive_owner_.store(std::thread::id());
reader_cond_.notify_all(); // wake all reader and writers
writer_cond_.notify_all(); // wake all reader and writers
@ -216,6 +214,10 @@ void read_write_mutex::unlock(bool exclusive_only /*= false*/) {
--concurrent_count_;
#endif // IRESEARCH_DEBUG
// TODO: this should be changed to SCOPED_LOCK_NAMED, as right now it is not
// guaranteed that we can succesfully acquire the mutex here. and if we don't,
// there is no guarantee that the notify_all will wake up queued waiter.
TRY_SCOPED_LOCK_NAMED(mutex_, lock); // try to aquire mutex for use with cond
// wake only writers since this is a reader

View File

@ -1,8 +1,10 @@
v3.4.8 (XXXX-XX-XX)
-------------------
* Decrease unnecessary wait times for agency callbacks in case they were
called earlier than expected by main thread
* Significantly reduced "random" 1 second delays in some arangosearch DDL operations.
* Decreased unnecessary wait times for agency callbacks in case they were
called earlier than expected by main thread.
* Upgraded arangodb starter version to 0.14.12

View File

@ -100,14 +100,14 @@ TEST_CASE("ContainersTest", "[iresearch][iresearch-containers]") {
cond.notify_all();
});
auto result0 = cond.wait_for(cond_lock, std::chrono::milliseconds(100));
auto result0 = cond.wait_for(cond_lock, std::chrono::milliseconds(50));
// MSVC 2015/2017 seems to sporadically notify condition variables without explicit request
MSVC2015_ONLY(while(!reset && result0 == std::cv_status::no_timeout) result0 = cond.wait_for(cond_lock, std::chrono::milliseconds(100)));
MSVC2017_ONLY(while(!reset && result0 == std::cv_status::no_timeout) result0 = cond.wait_for(cond_lock, std::chrono::milliseconds(100)));
MSVC2015_ONLY(while(!reset && result0 == std::cv_status::no_timeout) result0 = cond.wait_for(cond_lock, std::chrono::milliseconds(50)));
MSVC2017_ONLY(while(!reset && result0 == std::cv_status::no_timeout) result0 = cond.wait_for(cond_lock, std::chrono::milliseconds(50)));
lock.unlock();
auto result1 = cond.wait_for(cond_lock, std::chrono::milliseconds(100));
auto result1 = cond.wait_for(cond_lock, std::chrono::milliseconds(50));
cond_lock.unlock();
thread.join();
CHECK((std::cv_status::timeout == result0)); // check only after joining with thread to avoid early exit
@ -344,4 +344,4 @@ TEST_CASE("ContainersTest", "[iresearch][iresearch-containers]") {
// -----------------------------------------------------------------------------
// --SECTION-- END-OF-FILE
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------