1
0
Fork 0

fix compile warnings with newer compilers (#2604)

This commit is contained in:
Jan 2017-06-22 12:28:19 +02:00 committed by Frank Celler
parent ef3a7a32e5
commit 921ab97bd5
2 changed files with 24 additions and 17 deletions

View File

@ -54,9 +54,9 @@ class NonCopyable {
// prevent heap allocation // prevent heap allocation
struct NonHeapAllocatable { struct NonHeapAllocatable {
void* operator new(std::size_t) throw(std::bad_alloc) = delete; void* operator new(std::size_t) = delete;
void operator delete(void*) noexcept = delete; void operator delete(void*) noexcept = delete;
void* operator new[](std::size_t) throw(std::bad_alloc) = delete; void* operator new[](std::size_t) = delete;
void operator delete[](void*) noexcept = delete; void operator delete[](void*) noexcept = delete;
}; };

View File

@ -65,22 +65,24 @@ namespace basics {
/// instantiation for all values of Nr used in the executable. /// instantiation for all values of Nr used in the executable.
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
#define DATA_PROTECTOR_MULTIPLICITY 64
// TODO: Make this a template again once everybody has gcc >= 4.9.2 // TODO: Make this a template again once everybody has gcc >= 4.9.2
// template<int Nr> // template<int Nr>
class DataProtector { class DataProtector {
static constexpr int DATA_PROTECTOR_MULTIPLICITY = 64;
#ifdef _WIN32 #ifdef _WIN32
struct Entry { // 64 is the size of a cache line, struct Entry { // 64 is the size of a cache line,
#else #else
struct alignas(64) Entry { // 64 is the size of a cache line, struct alignas(64) Entry { // 64 is the size of a cache line,
#endif #endif
// it is important that different list entries lie in different // it is important that different list entries lie in different
// cache lines. // cache lines.
std::atomic<int> _count; std::atomic<int> _count;
Entry() : _count(0) {}
}; };
Entry* _list; typename std::aligned_storage<sizeof(Entry), alignof(Entry)>::type _list[DATA_PROTECTOR_MULTIPLICITY];
static std::atomic<int> _last; static std::atomic<int> _last;
@ -116,25 +118,29 @@ class DataProtector {
UnUser() = delete; UnUser() = delete;
}; };
DataProtector() : _list(nullptr) { DataProtector() {
_list = new Entry[DATA_PROTECTOR_MULTIPLICITY]; // initialize uninitialized memory
// Just to be sure: for (int i = 0; i < DATA_PROTECTOR_MULTIPLICITY; i++) {
for (size_t i = 0; i < DATA_PROTECTOR_MULTIPLICITY; i++) { new (_list + i) Entry;
_list[i]._count = 0;
} }
} }
~DataProtector() { delete[] _list; } ~DataProtector() {
for (int i = 0; i < DATA_PROTECTOR_MULTIPLICITY; i++) {
reinterpret_cast<Entry*>(_list + i)->~Entry();
}
}
UnUser use() { UnUser use() {
int id = getMyId(); int id = getMyId();
_list[id]._count++; // this is implicitly using memory_order_seq_cst // this is implicitly using memory_order_seq_cst
reinterpret_cast<Entry*>(_list + id)->_count++;
return UnUser(this, id); // return value optimization! return UnUser(this, id); // return value optimization!
} }
void scan() { void scan() {
for (size_t i = 0; i < DATA_PROTECTOR_MULTIPLICITY; i++) { for (int i = 0; i < DATA_PROTECTOR_MULTIPLICITY; i++) {
while (_list[i]._count > 0) { while (reinterpret_cast<Entry*>(_list + i)->_count > 0) {
// let other threads do some work while we're waiting // let other threads do some work while we're waiting
usleep(250); usleep(250);
} }
@ -143,7 +149,8 @@ class DataProtector {
private: private:
void unUse(int id) { void unUse(int id) {
_list[id]._count--; // this is implicitly using memory_order_seq_cst // this is implicitly using memory_order_seq_cst
reinterpret_cast<Entry*>(_list + id)->_count--;
} }
int getMyId(); int getMyId();