1
0
Fork 0

backport recent changes in iresearch from upstream (#6794)

This commit is contained in:
Andrey Abramov 2018-10-10 16:53:43 +03:00 committed by GitHub
parent 730f47eb0c
commit 4177bd15b1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
21 changed files with 263 additions and 99 deletions

View File

@ -765,6 +765,8 @@ install:
GTEST_URL="https://github.com/google/googletest.git" GTEST_URL="https://github.com/google/googletest.git"
cd ${DEPS_DIR} cd ${DEPS_DIR}
travis_retry git clone --depth 1 --recursive --quiet ${GTEST_URL} ${GTEST_DIR} || exit 1 travis_retry git clone --depth 1 --recursive --quiet ${GTEST_URL} ${GTEST_DIR} || exit 1
travis_retry git fetch --tags
travis_retrya git checkout tags/release-1.8.0
fi fi
export GTEST_ROOT=${GTEST_DIR}/googletest export GTEST_ROOT=${GTEST_DIR}/googletest
@ -805,7 +807,7 @@ script:
make iresearch-benchmarks make iresearch-benchmarks
fi fi
# execute tests # execute tests
ulimit -n 1024 # required for MacOS ulimit -n 5120 # required for MacOS (max-open-files = 16 threads * (100000/10000) commits * 8 segments-per-commit * 4 open-files-per-segment)
ulimit -a ulimit -a
if ! travis_wait 60 ./bin/iresearch-tests${TEST_EXECUTABLE_SUFFIX} ${TEST_PARAMS}; then if ! travis_wait 60 ./bin/iresearch-tests${TEST_EXECUTABLE_SUFFIX} ${TEST_PARAMS}; then
exit 1 exit 1

View File

@ -29,9 +29,16 @@ if (USE_VALGRIND)
add_definitions(-DIRESEARCH_VALGRIND) add_definitions(-DIRESEARCH_VALGRIND)
endif() endif()
if (MSVC AND MSVC_BUILD_THREADS) if (MSVC)
set(CMAKE_C_FLAGS "/MP${MSVC_BUILD_THREADS} ${CMAKE_C_FLAGS}") # FIXME TODO find a workaround or do not use alignas(...)
set(CMAKE_CXX_FLAGS "/MP${MSVC_BUILD_THREADS} ${CMAKE_CXX_FLAGS}") # MSVC2017.1 - MSVC2018.7 does not correctly support alignas()
# MSVC2017.8 requires the following define
add_definitions(-D_ENABLE_EXTENDED_ALIGNED_STORAGE)
if (MSVC_BUILD_THREADS)
set(CMAKE_C_FLAGS "/MP${MSVC_BUILD_THREADS} ${CMAKE_C_FLAGS}")
set(CMAKE_CXX_FLAGS "/MP${MSVC_BUILD_THREADS} ${CMAKE_CXX_FLAGS}")
endif()
endif() endif()
################################################################################ ################################################################################

View File

@ -29,7 +29,7 @@ number of components produced by third parties
## Third Party Software used by IResearch search engine ## Third Party Software used by IResearch search engine
1. Title: OpenFST 1. Title: OpenFST
- Copyright: Cyril Allauzen, Michael Riley - Copyright: Cyril Allauzen, Michael Riley
- License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0) - License: [Apache 2.0](http://www.apache.org/licenses/LICENSE-2.0)
- How it's used: Part of the source code tree - How it's used: Part of the source code tree
@ -41,7 +41,7 @@ number of components produced by third parties
- Copyright: Beman Dawes, David Abrahams, 1998-2005, Rene Rivera 2004-2005 - Copyright: Beman Dawes, David Abrahams, 1998-2005, Rene Rivera 2004-2005
- License: [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt) - License: [Boost Software License, Version 1.0](http://www.boost.org/LICENSE_1_0.txt)
- How it's used: Statically or dynamically linked into the library - How it's used: Statically or dynamically linked into the library
4. Title: Lz4 4. Title: Lz4
- Copyright: Yann Collet - Copyright: Yann Collet
- License: [BSD 2-clauses](https://opensource.org/licenses/bsd-license.php) - License: [BSD 2-clauses](https://opensource.org/licenses/bsd-license.php)
- How it's used: Statically or dynamically linked into the library - How it's used: Statically or dynamically linked into the library
@ -69,6 +69,10 @@ number of components produced by third parties
- Copyright: Hideyuki Tanaka - Copyright: Hideyuki Tanaka
- License: [BSD 3-clause](https://github.com/tanakh/cmdline/blob/master/LICENSE) - License: [BSD 3-clause](https://github.com/tanakh/cmdline/blob/master/LICENSE)
- How it's used: command line parser - How it's used: command line parser
11. Title: simdcomp
- Copyright: Daniel Lemire
- License: [BSD 3-clause](https://github.com/lemire/simdcomp/LICENSE)
- How it's used: compression library
## Third Party Open Source Software licenses ## Third Party Open Source Software licenses
@ -734,3 +738,32 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
### simdcomp
Copyright (c) 2014--, The authors
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or
other materials provided with the distribution.
* Neither the name of the {organization} nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -177,6 +177,26 @@ install:
- set GTEST_URL="https://github.com/google/googletest.git" - set GTEST_URL="https://github.com/google/googletest.git"
- cd %DEPS_DIR% - cd %DEPS_DIR%
- appveyor-retry git clone --depth 1 --recursive --quiet %GTEST_URL% %GTEST_DIR% || exit 1 - appveyor-retry git clone --depth 1 --recursive --quiet %GTEST_URL% %GTEST_DIR% || exit 1
- cd %GTEST_DIR%
- appveyor-retry git fetch --tags
- appveyor-retry git checkout tags/release-1.8.0
- cd googletest
- mkdir build && cd build
- cmake -g %APPVEYOR_BUILD_WORKER_IMAGE% -Ax64 -Dgtest_force_shared_crt=ON -DINSTALL_GTEST=ON -DBUILD_GMOCK=OFF -DCMAKE_DEBUG_POSTFIX="" ..
- cmake --build . %CMAKE_BUILD_OPTIONS%
# - cmake --install .
- mkdir %GTEST_DIR%\googletest\lib
- dir .. || true
- dir ..\googletest || true
- dir ..\include || true
- dir || true
- dir x64 || true
- dir x64\%CONFIGURATION% || true
- dir %CONFIGURATION% || true
- dir lib || true
# - move /Y lib\%CONFIGURATION%\* ..\lib || true
# - move /Y x64\%CONFIGURATION%\* ..\lib || true
- move /Y %CONFIGURATION%\* %GTEST_DIR%\googletest\lib || true
- set GTEST_ROOT=%GTEST_DIR%\googletest - set GTEST_ROOT=%GTEST_DIR%\googletest
############################################################################ ############################################################################

View File

@ -45,11 +45,6 @@ if (MSVC)
# disable "checked iterators" feature # disable "checked iterators" feature
add_definitions(-D_SCL_SECURE_NO_WARNINGS) add_definitions(-D_SCL_SECURE_NO_WARNINGS)
# FIXME TODO find a workaround or do not use alignas(...)
# MSVC2018.1 - MSVC2018.7 does not correctly support alignas()
# MSVC2018.8 requires the following define
add_definitions(-D_ENABLE_EXTENDED_ALIGNED_STORAGE)
# set OS specific sources # set OS specific sources
set(IResearch_core_os_specific_sources set(IResearch_core_os_specific_sources
./utils/mman_win32.cpp ./utils/mman_win32.cpp
@ -316,6 +311,7 @@ add_custom_command(
MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy MAIN_DEPENDENCY ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy DEPENDS ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
COMMAND ${CMAKE_COMMAND} -E make_directory iql COMMAND ${CMAKE_COMMAND} -E make_directory iql
COMMAND ${CMAKE_COMMAND} -E md5sum iql/parser.cc || ${CMAKE_COMMAND} -E remove iql/parser.yy
COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy || bison --graph --report=all -Wnone -o iql/parser.cc ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy COMMAND ${CMAKE_COMMAND} -E compare_files ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy || bison --graph --report=all -Wnone -o iql/parser.cc ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy
COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy COMMAND ${CMAKE_COMMAND} -E copy ${CMAKE_CURRENT_SOURCE_DIR}/iql/parser.yy iql/parser.yy
) )

View File

@ -3051,6 +3051,7 @@ bool writer::flush() {
data_out_->write_long(block_index_ptr); data_out_->write_long(block_index_ptr);
format_utils::write_footer(*data_out_); format_utils::write_footer(*data_out_);
data_out_.reset(); data_out_.reset();
columns_.clear(); // ensure next flush (without prepare(...)) will use the section without 'data_out_'
return true; return true;
} }

View File

@ -782,7 +782,7 @@ void index_writer::flush_context::emplace(active_segment_context&& segment) {
if (!ctx.dirty_) { if (!ctx.dirty_) {
assert(freelist_node); assert(freelist_node);
assert(segment.ctx_.use_count() == 2); // +1 for 'active_segment_context::ctx_', +1 for 'pending_segment_context::segment_' assert(segment.ctx_.use_count() == 2); // +1 for 'active_segment_context::ctx_', +1 for 'pending_segment_context::segment_'
segment = std::move(active_segment_context()); // reset before adding to freelist to garantee proper use_count() in get_segment_context(...) segment = active_segment_context(); // reset before adding to freelist to garantee proper use_count() in get_segment_context(...)
pending_segment_contexts_freelist_.push(*freelist_node); // add segment_context to free-list pending_segment_contexts_freelist_.push(*freelist_node); // add segment_context to free-list
} }
} }
@ -1170,10 +1170,10 @@ bool index_writer::consolidate(
std::set<const segment_meta*> candidates; std::set<const segment_meta*> candidates;
// hold reference to the last committed state // hold a reference to the last committed state to prevent files from being
// to prevent files to be deleted by a cleaner // deleted by a cleaner during the upcoming consolidation
// during upcoming consolidation // use atomic_load(...) since finish() may modify the pointer
const auto committed_state = committed_state_; auto committed_state = committed_state_helper::atomic_load(&committed_state_);
assert(committed_state); assert(committed_state);
auto committed_meta = committed_state->first; auto committed_meta = committed_state->first;
assert(committed_meta); assert(committed_meta);
@ -1239,7 +1239,7 @@ bool index_writer::consolidate(
if (found != candidates.size()) { if (found != candidates.size()) {
// not all candidates are valid // not all candidates are valid
IR_FRMT_WARN( IR_FRMT_WARN(
"Failed to start consolidation for index generation '" IR_SIZE_T_SPECIFIER "', found only '" IR_SIZE_T_SPECIFIER "' out of '" IR_SIZE_T_SPECIFIER "' candidates", "Failed to start consolidation for index generation '" IR_UINT64_T_SPECIFIER "', found only '" IR_SIZE_T_SPECIFIER "' out of '" IR_SIZE_T_SPECIFIER "' candidates",
committed_meta->generation(), committed_meta->generation(),
found, found,
candidates.size() candidates.size()
@ -1608,7 +1608,7 @@ index_writer::pending_context_t index_writer::flush_all() {
assert(modifications_begin <= modifications_end); assert(modifications_begin <= modifications_end);
assert(modifications_end <= modifications.segment_->modification_queries_.size()); assert(modifications_end <= modifications.segment_->modification_queries_.size());
modification_contexts_ref modification_queries( modification_contexts_ref modification_queries(
&(modifications.segment_->modification_queries_[modifications_begin]), modifications.segment_->modification_queries_.data() + modifications_begin,
modifications_end - modifications_begin modifications_end - modifications_begin
); );
@ -1717,7 +1717,7 @@ index_writer::pending_context_t index_writer::flush_all() {
assert(modifications_begin <= modifications_end); assert(modifications_begin <= modifications_end);
assert(modifications_end <= modifications.segment_->modification_queries_.size()); assert(modifications_end <= modifications.segment_->modification_queries_.size());
modification_contexts_ref modification_queries( modification_contexts_ref modification_queries(
&(modifications.segment_->modification_queries_[modifications_begin]), modifications.segment_->modification_queries_.data() + modifications_begin,
modifications_end - modifications_begin modifications_end - modifications_begin
); );
@ -1837,11 +1837,11 @@ index_writer::pending_context_t index_writer::flush_all() {
} }
modification_contexts_ref segment_modification_contexts( modification_contexts_ref segment_modification_contexts(
&(pending_segment_context.segment_->modification_queries_[0]), pending_segment_context.segment_->modification_queries_.data(),
pending_segment_context.segment_->modification_queries_.size() pending_segment_context.segment_->modification_queries_.size()
); );
update_contexts_ref flush_update_contexts( update_contexts_ref flush_update_contexts(
&(pending_segment_context.segment_->flushed_update_contexts_[flushed_docs_start]), pending_segment_context.segment_->flushed_update_contexts_.data() + flushed_docs_start,
flushed.meta.docs_count flushed.meta.docs_count
); );
@ -1893,7 +1893,7 @@ index_writer::pending_context_t index_writer::flush_all() {
assert(modifications_begin <= modifications_end); assert(modifications_begin <= modifications_end);
assert(modifications_end <= modifications.segment_->modification_queries_.size()); assert(modifications_end <= modifications.segment_->modification_queries_.size());
modification_contexts_ref modification_queries( modification_contexts_ref modification_queries(
&(modifications.segment_->modification_queries_[modifications_begin]), modifications.segment_->modification_queries_.data() + modifications_begin,
modifications_end - modifications_begin modifications_end - modifications_begin
); );
@ -2040,7 +2040,9 @@ void index_writer::finish() {
// after here transaction successfull (only noexcept operations below) // after here transaction successfull (only noexcept operations below)
// ........................................................................... // ...........................................................................
committed_state_ = std::move(pending_state_.commit); committed_state_helper::atomic_store(
&committed_state_, std::move(pending_state_.commit)
);
meta_.last_gen_ = committed_state_->first->gen_; // update 'last_gen_' to last commited/valid generation meta_.last_gen_ = committed_state_->first->gen_; // update 'last_gen_' to last commited/valid generation
pending_state_.reset(); // flush is complete, release reference to flush_context pending_state_.reset(); // flush is complete, release reference to flush_context
} }

View File

@ -93,8 +93,11 @@ ENABLE_BITMASK_ENUM(OpenMode);
/// the same directory simultaneously. /// the same directory simultaneously.
/// Thread safe. /// Thread safe.
//////////////////////////////////////////////////////////////////////////////// ////////////////////////////////////////////////////////////////////////////////
class IRESEARCH_API index_writer : util::noncopyable { class IRESEARCH_API index_writer:
private atomic_shared_ptr_helper<std::pair<
std::shared_ptr<index_meta>, std::vector<index_file_refs::ref_t>
>>,
private util::noncopyable {
private: private:
struct flush_context; // forward declaration struct flush_context; // forward declaration
struct segment_context; // forward declaration struct segment_context; // forward declaration
@ -112,7 +115,7 @@ class IRESEARCH_API index_writer : util::noncopyable {
/// @brief segment references given out by flush_context to allow tracking /// @brief segment references given out by flush_context to allow tracking
/// and updating flush_context::pending_segment_context /// and updating flush_context::pending_segment_context
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
class active_segment_context: private util::noncopyable { // non-copyable to ensure only one copy for get/put class IRESEARCH_API active_segment_context: private util::noncopyable { // non-copyable to ensure only one copy for get/put
public: public:
active_segment_context() = default; active_segment_context() = default;
active_segment_context( active_segment_context(
@ -281,13 +284,15 @@ class IRESEARCH_API index_writer : util::noncopyable {
auto rollback_extra = auto rollback_extra =
writer.docs_cached() + doc_limits::min() - uncomitted_doc_id_begin; // ensure reset() will be noexcept writer.docs_cached() + doc_limits::min() - uncomitted_doc_id_begin; // ensure reset() will be noexcept
rollback.set(writer.docs_cached()); rollback.reserve(writer.docs_cached() + 1); // reserve space for rollback
if (doc_limits::eof(writer.docs_cached()) if (integer_traits<doc_id_t>::const_max <= writer.docs_cached() + doc_limits::min()
|| doc_limits::eof(writer.begin(update, rollback_extra))) { || doc_limits::eof(writer.begin(update, rollback_extra))) {
break; // the segment cannot fit any more docs, must roll back break; // the segment cannot fit any more docs, must roll back
} }
assert(writer.docs_cached());
rollback.set(writer.docs_cached() - 1); // 0-based
segment->buffered_docs_.store(writer.docs_cached()); segment->buffered_docs_.store(writer.docs_cached());
auto done = !func(doc); auto done = !func(doc);
@ -314,7 +319,8 @@ class IRESEARCH_API index_writer : util::noncopyable {
for (auto i = rollback.size(); i && rollback.any();) { for (auto i = rollback.size(); i && rollback.any();) {
if (rollback.test(--i)) { if (rollback.test(--i)) {
rollback.unset(i); // if new doc_ids at end this allows to terminate 'for' earlier rollback.unset(i); // if new doc_ids at end this allows to terminate 'for' earlier
writer.remove(i + type_limits<type_t::doc_id_t>::min()); // convert to doc_id assert(integer_traits<doc_id_t>::const_max >= i + doc_limits::min());
writer.remove(doc_id_t(i + doc_limits::min())); // convert to doc_id
} }
} }
@ -687,7 +693,7 @@ class IRESEARCH_API index_writer : util::noncopyable {
/// @note segment_writer::doc_contexts[...uncomitted_document_contexts_): generation == flush_context::generation /// @note segment_writer::doc_contexts[...uncomitted_document_contexts_): generation == flush_context::generation
/// @note segment_writer::doc_contexts[uncomitted_document_contexts_...]: generation == local generation (updated when segment_context registered once again with flush_context) /// @note segment_writer::doc_contexts[uncomitted_document_contexts_...]: generation == local generation (updated when segment_context registered once again with flush_context)
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
struct IRESEARCH_API segment_context { struct IRESEARCH_API segment_context { // IRESEARCH_API because of make_update_context(...)/remove(...) used by documents_context::replace(...)/documents_context::remove(...)
struct flushed_t: public index_meta::index_segment_t { struct flushed_t: public index_meta::index_segment_t {
doc_id_t docs_mask_tail_doc_id{integer_traits<doc_id_t>::max()}; // starting doc_id that should be added to docs_mask doc_id_t docs_mask_tail_doc_id{integer_traits<doc_id_t>::max()}; // starting doc_id that should be added to docs_mask
flushed_t() = default; flushed_t() = default;
@ -695,8 +701,8 @@ class IRESEARCH_API index_writer : util::noncopyable {
: index_meta::index_segment_t(std::move(meta)) {} : index_meta::index_segment_t(std::move(meta)) {}
}; };
typedef std::function<segment_meta()> segment_meta_generator_t; typedef std::function<segment_meta()> segment_meta_generator_t;
DECLARE_SHARED_PTR(segment_context); DECLARE_SHARED_PTR(segment_context);
std::atomic<size_t> active_count_; // number of active in-progress operations (insert/replace) (e.g. document instances or replace(...)) std::atomic<size_t> active_count_; // number of active in-progress operations (insert/replace) (e.g. document instances or replace(...))
std::atomic<size_t> buffered_docs_; // for use with index_writer::buffered_docs() asynchronous call std::atomic<size_t> buffered_docs_; // for use with index_writer::buffered_docs() asynchronous call
format::ptr codec_; // the codec to used for flushing a segment writer format::ptr codec_; // the codec to used for flushing a segment writer
@ -761,6 +767,9 @@ class IRESEARCH_API index_writer : util::noncopyable {
std::pair<std::shared_ptr<index_meta>, std::pair<std::shared_ptr<index_meta>,
file_refs_t file_refs_t
>> committed_state_t; >> committed_state_t;
typedef atomic_shared_ptr_helper<
committed_state_t::element_type
> committed_state_helper;
typedef unbounded_object_pool<segment_context> segment_pool_t; typedef unbounded_object_pool<segment_context> segment_pool_t;
@ -771,7 +780,7 @@ class IRESEARCH_API index_writer : util::noncopyable {
/// 'segment_context' is not used once the tracker 'flush_context' is no /// 'segment_context' is not used once the tracker 'flush_context' is no
/// longer active /// longer active
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
struct IRESEARCH_API flush_context { struct flush_context {
typedef concurrent_stack<size_t> freelist_t; // 'value' == node offset into 'pending_segment_context_' typedef concurrent_stack<size_t> freelist_t; // 'value' == node offset into 'pending_segment_context_'
struct pending_segment_context: public freelist_t::node_type { struct pending_segment_context: public freelist_t::node_type {
const size_t doc_id_begin_; // starting segment_context::document_contexts_ for this flush_context range [pending_segment_context::doc_id_begin_, std::min(pending_segment_context::doc_id_end_, segment_context::uncomitted_doc_ids_)) const size_t doc_id_begin_; // starting segment_context::document_contexts_ for this flush_context range [pending_segment_context::doc_id_begin_, std::min(pending_segment_context::doc_id_end_, segment_context::uncomitted_doc_ids_))
@ -948,4 +957,4 @@ class IRESEARCH_API index_writer : util::noncopyable {
NS_END NS_END
#endif #endif

View File

@ -87,7 +87,7 @@ struct empty_term_iterator final : irs::term_iterator {
////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////
struct empty_term_reader final : irs::singleton<empty_term_reader>, irs::term_reader { struct empty_term_reader final : irs::singleton<empty_term_reader>, irs::term_reader {
virtual iresearch::seek_term_iterator::ptr iterator() const override { return nullptr; } virtual iresearch::seek_term_iterator::ptr iterator() const override { return nullptr; }
virtual const iresearch::field_meta& meta() const override { virtual const iresearch::field_meta& meta() const override {
return irs::field_meta::EMPTY; return irs::field_meta::EMPTY;
} }
@ -102,13 +102,13 @@ struct empty_term_reader final : irs::singleton<empty_term_reader>, irs::term_re
virtual uint64_t docs_count() const override { return 0; } virtual uint64_t docs_count() const override { return 0; }
// less significant term // less significant term
virtual const iresearch::bytes_ref& (min)() const override { virtual const iresearch::bytes_ref& (min)() const override {
return iresearch::bytes_ref::NIL; return iresearch::bytes_ref::NIL;
} }
// most significant term // most significant term
virtual const iresearch::bytes_ref& (max)() const override { virtual const iresearch::bytes_ref& (max)() const override {
return iresearch::bytes_ref::NIL; return iresearch::bytes_ref::NIL;
} }
}; // empty_term_reader }; // empty_term_reader

View File

@ -52,12 +52,20 @@ doc_id_t segment_writer::begin(
assert(docs_cached() + type_limits<type_t::doc_id_t>::min() - 1 < type_limits<type_t::doc_id_t>::eof()); assert(docs_cached() + type_limits<type_t::doc_id_t>::min() - 1 < type_limits<type_t::doc_id_t>::eof());
valid_ = true; valid_ = true;
norm_fields_.clear(); // clear norm fields norm_fields_.clear(); // clear norm fields
docs_mask_.reserve(
docs_mask_.size() + 1 + reserve_rollback_extra if (docs_mask_.capacity() <= docs_mask_.size() + 1 + reserve_rollback_extra) {
); // reserve space for potential rollback docs_mask_.reserve(
math::roundup_power2(docs_mask_.size() + 1 + reserve_rollback_extra) // reserve in blocks of power-of-2
); // reserve space for potential rollback
}
if (docs_context_.size() >= docs_context_.capacity()) {
docs_context_.reserve(math::roundup_power2(docs_context_.size() + 1)); // reserve in blocks of power-of-2
}
docs_context_.emplace_back(ctx); docs_context_.emplace_back(ctx);
return docs_cached() + type_limits<type_t::doc_id_t>::min() - 1; // -1 for 0-based offset return doc_id_t(docs_cached() + type_limits<type_t::doc_id_t>::min() - 1); // -1 for 0-based offset
} }
segment_writer::ptr segment_writer::make(directory& dir) { segment_writer::ptr segment_writer::make(directory& dir) {
@ -68,15 +76,21 @@ segment_writer::ptr segment_writer::make(directory& dir) {
size_t segment_writer::memory() const NOEXCEPT { size_t segment_writer::memory() const NOEXCEPT {
return sizeof(segment_writer) return sizeof(segment_writer)
+ (sizeof(update_contexts::value_type) * docs_context_.size()) + (sizeof(update_contexts::value_type) * docs_context_.size())
+ (sizeof(document_mask::value_type) * docs_mask_.size()) + (sizeof(bitvector) + docs_mask_.count() * sizeof(bitvector::word_t))
+ fields_.memory() + fields_.memory()
; ;
} }
bool segment_writer::remove(doc_id_t doc_id) { bool segment_writer::remove(doc_id_t doc_id) {
return type_limits<type_t::doc_id_t>::valid(doc_id) if (!type_limits<type_t::doc_id_t>::valid(doc_id)
&& (doc_id - type_limits<type_t::doc_id_t>::min()) < docs_cached() || (doc_id - type_limits<type_t::doc_id_t>::min()) >= docs_cached()
&& docs_mask_.insert(doc_id).second; || docs_mask_.test(doc_id - type_limits<type_t::doc_id_t>::min())) {
return false;
}
docs_mask_.set(doc_id - type_limits<type_t::doc_id_t>::min());
return true;
} }
segment_writer::segment_writer(directory& dir) NOEXCEPT segment_writer::segment_writer(directory& dir) NOEXCEPT
@ -188,16 +202,33 @@ bool segment_writer::flush(std::string& filename, segment_meta& meta) {
fields_.flush(*field_writer_, state); fields_.flush(*field_writer_, state);
} }
size_t docs_mask_count = 0;
// write non-empty document mask // write non-empty document mask
if (!docs_mask_.empty()) { if (docs_mask_.any()) {
document_mask docs_mask;
auto writer = meta.codec->get_document_mask_writer(); auto writer = meta.codec->get_document_mask_writer();
writer->write(dir_, meta, docs_mask_); docs_mask.reserve(docs_mask_.size());
for (size_t doc_id = 0, doc_id_end = docs_mask_.size();
doc_id < doc_id_end;
++doc_id) {
if (docs_mask_.test(doc_id)) {
assert(size_t(integer_traits<doc_id_t>::const_max) >= doc_id + type_limits<type_t::doc_id_t>::min());
docs_mask.emplace(
doc_id_t(doc_id + type_limits<type_t::doc_id_t>::min())
);
}
}
writer->write(dir_, meta, docs_mask);
docs_mask_count = docs_mask.size();
} }
assert(docs_cached() >= docs_mask_.size()); assert(docs_cached() >= docs_mask_count);
meta.docs_count = docs_cached(); meta.docs_count = docs_cached();
meta.live_docs_count = meta.docs_count - docs_mask_.size(); meta.live_docs_count = meta.docs_count - docs_mask_count;
meta.files.clear(); // prepare empy set to be swaped into dir_ meta.files.clear(); // prepare empy set to be swaped into dir_
if (!dir_.swap_tracked(meta.files)) { if (!dir_.swap_tracked(meta.files)) {

View File

@ -27,6 +27,7 @@
#include "field_data.hpp" #include "field_data.hpp"
#include "analysis/token_stream.hpp" #include "analysis/token_stream.hpp"
#include "formats/formats.hpp" #include "formats/formats.hpp"
#include "utils/bitvector.hpp"
#include "utils/directory_utils.hpp" #include "utils/directory_utils.hpp"
#include "utils/noncopyable.hpp" #include "utils/noncopyable.hpp"
#include "utils/type_limits.hpp" #include "utils/type_limits.hpp"
@ -214,7 +215,8 @@ class IRESEARCH_API segment_writer: util::noncopyable {
// implicitly NOEXCEPT since we reserve memory in 'begin' // implicitly NOEXCEPT since we reserve memory in 'begin'
void rollback() { void rollback() {
// mark as removed since not fully inserted // mark as removed since not fully inserted
remove(docs_cached() + type_limits<type_t::doc_id_t>::min() - 1); // -1 for 0-based offset assert(docs_cached() + type_limits<type_t::doc_id_t>::min() - 1 < type_limits<type_t::doc_id_t>::eof()); // user should check return of begin() != eof()
remove(doc_id_t(docs_cached() + type_limits<type_t::doc_id_t>::min() - 1)); // -1 for 0-based offset
valid_ = false; valid_ = false;
} }
@ -329,7 +331,7 @@ class IRESEARCH_API segment_writer: util::noncopyable {
IRESEARCH_API_PRIVATE_VARIABLES_BEGIN IRESEARCH_API_PRIVATE_VARIABLES_BEGIN
update_contexts docs_context_; update_contexts docs_context_;
document_mask docs_mask_; // invalid/removed doc_ids (e.g. partially indexed due to indexing failure) bitvector docs_mask_; // invalid/removed doc_ids (e.g. partially indexed due to indexing failure)
fields_data fields_; fields_data fields_;
std::unordered_map<hashed_string_ref, column> columns_; std::unordered_map<hashed_string_ref, column> columns_;
std::unordered_set<field_data*> norm_fields_; // document fields for normalization std::unordered_set<field_data*> norm_fields_; // document fields for normalization

View File

@ -163,10 +163,11 @@
|| ((_MSC_FULL_VER >= 191125542) && (_MSC_FULL_VER <= 191125547)) \ || ((_MSC_FULL_VER >= 191125542) && (_MSC_FULL_VER <= 191125547)) \
|| ((_MSC_FULL_VER >= 191225830) && (_MSC_FULL_VER <= 191225835)) \ || ((_MSC_FULL_VER >= 191225830) && (_MSC_FULL_VER <= 191225835)) \
|| ((_MSC_FULL_VER >= 191326128) && (_MSC_FULL_VER <= 191326132)) \ || ((_MSC_FULL_VER >= 191326128) && (_MSC_FULL_VER <= 191326132)) \
|| ((_MSC_FULL_VER >= 191426430) && (_MSC_FULL_VER <= 191426433))) || ((_MSC_FULL_VER >= 191426430) && (_MSC_FULL_VER <= 191426433)) \
#define MSVC2017_34567_OPTIMIZED_WORKAROUND(...) __VA_ARGS__ || ((_MSC_FULL_VER >= 191526726) && (_MSC_FULL_VER <= 191526730)))
#define MSVC2017_345678_OPTIMIZED_WORKAROUND(...) __VA_ARGS__
#else #else
#define MSVC2017_34567_OPTIMIZED_WORKAROUND(...) #define MSVC2017_345678_OPTIMIZED_WORKAROUND(...)
#endif #endif
// hook for MSVC-only code // hook for MSVC-only code

View File

@ -883,7 +883,7 @@ inline bool read_block_rl32(
base = in.read_vint(); base = in.read_vint();
avg = in.read_vint(); avg = in.read_vint();
const uint32_t bits = in.read_vint(); const uint32_t bits = in.read_vint();
const uint32_t value = in.read_vlong(); const uint32_t value = in.read_vint();
return bitpack::ALL_EQUAL == bits return bitpack::ALL_EQUAL == bits
&& 0 == value; // delta && 0 == value; // delta

View File

@ -71,6 +71,7 @@ class IRESEARCH_API read_write_mutex final {
class read_mutex { class read_mutex {
public: public:
read_mutex(read_write_mutex& mutex): mutex_(mutex) {} read_mutex(read_write_mutex& mutex): mutex_(mutex) {}
read_mutex& operator=(read_mutex&) = delete; // because of reference
void lock() { mutex_.lock_read(); } void lock() { mutex_.lock_read(); }
bool try_lock() { return mutex_.try_lock_read(); } bool try_lock() { return mutex_.try_lock_read(); }
void unlock() { mutex_.unlock(); } void unlock() { mutex_.unlock(); }
@ -82,6 +83,7 @@ class IRESEARCH_API read_write_mutex final {
class write_mutex { class write_mutex {
public: public:
write_mutex(read_write_mutex& mutex): mutex_(mutex) {} write_mutex(read_write_mutex& mutex): mutex_(mutex) {}
write_mutex& operator=(write_mutex&) = delete; // because of reference
void lock() { mutex_.lock_write(); } void lock() { mutex_.lock_write(); }
bool owns_write() { return mutex_.owns_write(); } bool owns_write() { return mutex_.owns_write(); }
bool try_lock() { return mutex_.try_lock_write(); } bool try_lock() { return mutex_.try_lock_write(); }

View File

@ -678,9 +678,9 @@ class block_pool_sliced_inserter : public std::iterator < std::output_iterator_t
block_pool_sliced_inserter& operator++() { return *this; } block_pool_sliced_inserter& operator++() { return *this; }
// MSVC 2017.3 through 2017.7 incorectly count offsets if this function is inlined during optimization // MSVC 2017.3 through 2017.8 incorectly count offsets if this function is inlined during optimization
// MSVC 2017.2 and below work correctly for both debug and release // MSVC 2017.2 and below work correctly for both debug and release
MSVC2017_34567_OPTIMIZED_WORKAROUND(__declspec(noinline)) MSVC2017_345678_OPTIMIZED_WORKAROUND(__declspec(noinline))
void write(const_pointer b, size_t len) { void write(const_pointer b, size_t len) {
// find end of the slice // find end of the slice
for (; 0 == *where_ && len; --len, ++where_, ++b) { for (; 0 == *where_ && len; --len, ++where_, ++b) {
@ -900,4 +900,4 @@ class block_pool {
NS_END NS_END
#endif #endif

View File

@ -99,7 +99,6 @@ NS_END
#endif #endif
#include <boost/crc.hpp> #include <boost/crc.hpp>
MSVC_ONLY(__pragma(warning(pop)))
NS_ROOT NS_ROOT

View File

@ -55,6 +55,7 @@ class aligned_storage {
// as per MSVC documentation: // as per MSVC documentation:
// align(#) valid entries are integer powers of two from 1 to 8192 (bytes) // align(#) valid entries are integer powers of two from 1 to 8192 (bytes)
// e.g. 2, 4, 8, 16, 32, or 64 // e.g. 2, 4, 8, 16, 32, or 64
#pragma warning(disable: 4324) // structure was padded due to __declspec(align())
template<size_t Align> struct align_t {}; template<size_t Align> struct align_t {};
template<> struct ALIGNAS(1) align_t<1> { }; template<> struct ALIGNAS(1) align_t<1> { };
template<> struct ALIGNAS(2) align_t<2> { }; template<> struct ALIGNAS(2) align_t<2> { };
@ -70,6 +71,7 @@ class aligned_storage {
template<> struct ALIGNAS(2048) align_t<2048> { }; template<> struct ALIGNAS(2048) align_t<2048> { };
template<> struct ALIGNAS(4096) align_t<4096> { }; template<> struct ALIGNAS(4096) align_t<4096> { };
template<> struct ALIGNAS(8192) align_t<8192> { }; template<> struct ALIGNAS(8192) align_t<8192> { };
#pragma warning(default: 4324)
#else #else
template<size_t Align> struct ALIGNAS(Align) align_t { }; template<size_t Align> struct ALIGNAS(Align) align_t { };
#endif #endif
@ -620,4 +622,4 @@ static ptr make(Args&&... args) { \
return irs::memory::maker<class_type>::make(); \ return irs::memory::maker<class_type>::make(); \
} }
#endif #endif

View File

@ -276,12 +276,10 @@ class bounded_object_pool {
}); });
} }
operator bool() const NOEXCEPT { return nullptr != slot_; }
element_type& operator*() const NOEXCEPT { return *slot_->value.ptr; } element_type& operator*() const NOEXCEPT { return *slot_->value.ptr; }
element_type* operator->() const NOEXCEPT { return get(); } element_type* operator->() const NOEXCEPT { return get(); }
element_type* get() const NOEXCEPT { return slot_->value.ptr.get(); } element_type* get() const NOEXCEPT { return slot_->value.ptr.get(); }
operator bool() const NOEXCEPT {
return static_cast<bool>(slot_);
}
private: private:
static void reset_impl(node_type*& slot) NOEXCEPT { static void reset_impl(node_type*& slot) NOEXCEPT {
@ -381,9 +379,9 @@ class bounded_object_pool {
} }
} }
// MSVC 2017.3 through 2017.7 incorectly increment counter if this function is inlined during optimization // MSVC 2017.3 through 2017.8 incorectly increment counter if this function is inlined during optimization
// MSVC 2017.2 and below TODO test for both debug and release // MSVC 2017.2 and below TODO test for both debug and release
MSVC2017_34567_OPTIMIZED_WORKAROUND(__declspec(noinline)) MSVC2017_345678_OPTIMIZED_WORKAROUND(__declspec(noinline))
void unlock(node_type& slot) const NOEXCEPT { void unlock(node_type& slot) const NOEXCEPT {
free_list_.push(slot); free_list_.push(slot);
cond_.notify_all(); cond_.notify_all();
@ -818,7 +816,7 @@ class unbounded_object_pool_volatile
typename base_t::node* head = nullptr; typename base_t::node* head = nullptr;
// reset all cached instances // reset all cached instances
while (head = this->free_objects_.pop()) { while ((head = this->free_objects_.pop())) {
head->value = typename T::ptr{}; // empty instance head->value = typename T::ptr{}; // empty instance
this->free_slots_.push(*head); this->free_slots_.push(*head);
} }
@ -846,4 +844,4 @@ class unbounded_object_pool_volatile
NS_END NS_END
#endif #endif

View File

@ -1122,13 +1122,15 @@ class index_test_case_base : public tests::index_test_base {
void profile_bulk_index_dedicated_commit(size_t insert_threads, size_t commit_threads, size_t commit_interval) { void profile_bulk_index_dedicated_commit(size_t insert_threads, size_t commit_threads, size_t commit_interval) {
auto* directory = &dir(); auto* directory = &dir();
irs::index_writer::options options;
std::atomic<bool> working(true); std::atomic<bool> working(true);
std::atomic<size_t> writer_commit_count(0); std::atomic<size_t> writer_commit_count(0);
commit_threads = (std::max)(size_t(1), commit_threads); commit_threads = (std::max)(size_t(1), commit_threads);
options.segment_count_max = 8; // match original implementation or may run out of file handles (e.g. MacOS/Travis)
irs::async_utils::thread_pool thread_pool(commit_threads, commit_threads); irs::async_utils::thread_pool thread_pool(commit_threads, commit_threads);
auto writer = open_writer(); auto writer = open_writer(irs::OM_CREATE, options);
for (size_t i = 0; i < commit_threads; ++i) { for (size_t i = 0; i < commit_threads; ++i) {
thread_pool.run([commit_interval, directory, &working, &writer, &writer_commit_count]()->void { thread_pool.run([commit_interval, directory, &working, &writer, &writer_commit_count]()->void {
@ -1154,9 +1156,13 @@ class index_test_case_base : public tests::index_test_base {
void profile_bulk_index_dedicated_consolidate(size_t num_threads, size_t batch_size, size_t consolidate_interval) { void profile_bulk_index_dedicated_consolidate(size_t num_threads, size_t batch_size, size_t consolidate_interval) {
const auto policy = irs::index_utils::consolidate_all(); const auto policy = irs::index_utils::consolidate_all();
auto* directory = &dir(); auto* directory = &dir();
irs::index_writer::options options;
std::atomic<bool> working(true); std::atomic<bool> working(true);
irs::async_utils::thread_pool thread_pool(2, 2); irs::async_utils::thread_pool thread_pool(2, 2);
auto writer = open_writer();
options.segment_count_max = 8; // match original implementation or may run out of file handles (e.g. MacOS/Travis)
auto writer = open_writer(irs::OM_CREATE, options);
thread_pool.run([consolidate_interval, directory, &working, &writer, &policy]()->void { thread_pool.run([consolidate_interval, directory, &working, &writer, &policy]()->void {
while (working.load()) { while (working.load()) {
@ -1171,7 +1177,8 @@ class index_test_case_base : public tests::index_test_base {
} }
thread_pool.stop(); thread_pool.stop();
writer->consolidate(policy); writer->commit(); // ensure there are no consolidation-pending segments left in 'consolidating_segments_' before applying the final consolidation
ASSERT_TRUE(writer->consolidate(policy));
writer->commit(); writer->commit();
struct dummy_doc_template_t: public tests::csv_doc_generator::doc_template { struct dummy_doc_template_t: public tests::csv_doc_generator::doc_template {
@ -8475,13 +8482,21 @@ TEST_F(memory_index_test, document_context) {
ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // wait for insertion to start ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // wait for insertion to start
std::thread thread1([&writer, &field]()->void { std::atomic<bool> commit(false);
std::thread thread1([&writer, &field, &commit]()->void {
writer->commit(); writer->commit();
commit = true;
SCOPED_LOCK(field.cond_mutex); SCOPED_LOCK(field.cond_mutex);
field.cond.notify_all(); field.cond.notify_all();
}); });
ASSERT_EQ(std::cv_status::timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100))); // verify commit() blocks auto result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)); // verify commit() blocks
// MSVC 2015/2017 seems to sporadically notify condition variables without explicit request
MSVC2015_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
MSVC2017_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
ASSERT_EQ(std::cv_status::timeout, result);
field_lock.unlock(); field_lock.unlock();
ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes
thread0.join(); thread0.join();
@ -8512,13 +8527,21 @@ TEST_F(memory_index_test, document_context) {
ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // wait for insertion to start ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // wait for insertion to start
std::thread thread1([&writer, &field]()->void { std::atomic<bool> commit(false);
std::thread thread1([&writer, &field, &commit]()->void {
writer->commit(); writer->commit();
commit = true;
SCOPED_LOCK(field.cond_mutex); SCOPED_LOCK(field.cond_mutex);
field.cond.notify_all(); field.cond.notify_all();
}); });
ASSERT_EQ(std::cv_status::timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100))); // verify commit() blocks auto result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)); // verify commit() blocks
// MSVC 2015/2017 seems to sporadically notify condition variables without explicit request
MSVC2015_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
MSVC2017_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
ASSERT_EQ(std::cv_status::timeout, result);
field_lock.unlock(); field_lock.unlock();
ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes
thread0.join(); thread0.join();
@ -8579,13 +8602,21 @@ TEST_F(memory_index_test, document_context) {
auto ctx = writer->documents(); auto ctx = writer->documents();
SCOPED_LOCK_NAMED(field.cond_mutex, field_cond_lock); // wait for insertion to start SCOPED_LOCK_NAMED(field.cond_mutex, field_cond_lock); // wait for insertion to start
ctx.remove(*(query_doc1.filter)); ctx.remove(*(query_doc1.filter));
std::thread thread1([&writer, &field]()->void { std::atomic<bool> commit(false); // FIXME TODO remove once segment_context will not block flush_all()
std::thread thread1([&writer, &field, &commit]()->void {
writer->commit(); writer->commit();
commit = true;
SCOPED_LOCK(field.cond_mutex); SCOPED_LOCK(field.cond_mutex);
field.cond.notify_all(); field.cond.notify_all();
}); });
ASSERT_EQ(std::cv_status::timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); field_cond_lock.unlock(); // verify commit() finishes FIXME TODO use below once segment_context will not block flush_all() auto result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000)); // verify commit() finishes FIXME TODO remove once segment_context will not block flush_all()
// MSVC 2015/2017 seems to sporadically notify condition variables without explicit request FIXME TODO remove once segment_context will not block flush_all()
MSVC2015_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
MSVC2017_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
ASSERT_EQ(std::cv_status::timeout, result); field_cond_lock.unlock(); // verify commit() finishes FIXME TODO use below once segment_context will not block flush_all()
//ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes //ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes
{ irs::index_writer::documents_context(std::move(ctx)); } // release ctx before join() in case of test failure { irs::index_writer::documents_context(std::move(ctx)); } // release ctx before join() in case of test failure
thread1.join(); thread1.join();
@ -8624,13 +8655,21 @@ TEST_F(memory_index_test, document_context) {
doc.insert(irs::action::index, doc2->indexed.begin(), doc2->indexed.end()); doc.insert(irs::action::index, doc2->indexed.begin(), doc2->indexed.end());
doc.insert(irs::action::store, doc2->stored.begin(), doc2->stored.end()); doc.insert(irs::action::store, doc2->stored.begin(), doc2->stored.end());
} }
std::thread thread1([&writer, &field]()->void { std::atomic<bool> commit(false); // FIXME TODO remove once segment_context will not block flush_all()
std::thread thread1([&writer, &field, &commit]()->void {
writer->commit(); writer->commit();
commit = true;
SCOPED_LOCK(field.cond_mutex); SCOPED_LOCK(field.cond_mutex);
field.cond.notify_all(); field.cond.notify_all();
}); });
ASSERT_EQ(std::cv_status::timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); field_cond_lock.unlock(); // verify commit() finishes FIXME TODO use below once segment_context will not block flush_all() auto result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000)); // verify commit() finishes FIXME TODO remove once segment_context will not block flush_all()
// MSVC 2015/2017 seems to sporadically notify condition variables without explicit request FIXME TODO remove once segment_context will not block flush_all()
MSVC2015_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
MSVC2017_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
ASSERT_EQ(std::cv_status::timeout, result); field_cond_lock.unlock(); // verify commit() finishes FIXME TODO use below once segment_context will not block flush_all()
//ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes //ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes
{ irs::index_writer::documents_context(std::move(ctx)); } // release ctx before join() in case of test failure { irs::index_writer::documents_context(std::move(ctx)); } // release ctx before join() in case of test failure
thread1.join(); thread1.join();
@ -8672,13 +8711,21 @@ TEST_F(memory_index_test, document_context) {
return false; return false;
} }
); );
std::thread thread1([&writer, &field]()->void { std::atomic<bool> commit(false); // FIXME TODO remove once segment_context will not block flush_all()
std::thread thread1([&writer, &field, &commit]()->void {
writer->commit(); writer->commit();
commit = true;
SCOPED_LOCK(field.cond_mutex); SCOPED_LOCK(field.cond_mutex);
field.cond.notify_all(); field.cond.notify_all();
}); });
ASSERT_EQ(std::cv_status::timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); field_cond_lock.unlock(); // verify commit() finishes FIXME TODO use below once segment_context will not block flush_all() auto result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000)); // verify commit() finishes FIXME TODO remove once segment_context will not block flush_all()
// MSVC 2015/2017 seems to sporadically notify condition variables without explicit request FIXME TODO remove once segment_context will not block flush_all()
MSVC2015_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
MSVC2017_ONLY(while(!commit && result == std::cv_status::no_timeout) result = field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(100)));
ASSERT_EQ(std::cv_status::timeout, result); field_cond_lock.unlock(); // verify commit() finishes FIXME TODO use below once segment_context will not block flush_all()
// ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes // ASSERT_EQ(std::cv_status::no_timeout, field.cond.wait_for(field_cond_lock, std::chrono::milliseconds(1000))); // verify commit() finishes
{ irs::index_writer::documents_context(std::move(ctx)); } // release ctx before join() in case of test failure { irs::index_writer::documents_context(std::move(ctx)); } // release ctx before join() in case of test failure
thread1.join(); thread1.join();
@ -11462,8 +11509,7 @@ TEST_F(memory_index_test, profile_bulk_index_multithread_cleanup_mt) {
TEST_F(memory_index_test, profile_bulk_index_multithread_consolidate_mt) { TEST_F(memory_index_test, profile_bulk_index_multithread_consolidate_mt) {
// a lot of threads cause a lot of contention for the segment pool // a lot of threads cause a lot of contention for the segment pool
// small consolidate_interval causes too many policies to be added and slows down test profile_bulk_index_dedicated_consolidate(8, 10000, 500);
profile_bulk_index_dedicated_consolidate(8, 10000, 5000);
} }
TEST_F(memory_index_test, profile_bulk_index_multithread_dedicated_commit_mt) { TEST_F(memory_index_test, profile_bulk_index_multithread_dedicated_commit_mt) {

View File

@ -283,8 +283,9 @@ TEST_F(memory_pool_allocator_test, profile_std_map) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::map<size_t, test_data, std::less<size_t>, alloc_t> data( std::map<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>(), alloc_t{pool} std::less<size_t>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -321,8 +322,9 @@ TEST_F(memory_pool_allocator_test, profile_std_map) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::map<size_t, test_data, std::less<size_t>, alloc_t> data( std::map<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>{}, alloc_t{pool} std::less<size_t>{}, alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -360,8 +362,9 @@ TEST_F(memory_pool_allocator_test, profile_std_map) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::map<size_t, test_data, std::less<size_t>, alloc_t> data( std::map<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>(), alloc_t{pool} std::less<size_t>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -452,8 +455,9 @@ TEST_F(memory_pool_allocator_test, profile_std_multimap) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data( std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>(), alloc_t{pool} std::less<size_t>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -497,8 +501,9 @@ TEST_F(memory_pool_allocator_test, profile_std_multimap) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data( std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>(), alloc_t{pool} std::less<size_t>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -530,8 +535,9 @@ TEST_F(memory_pool_allocator_test, profile_std_multimap) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data( std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>(), alloc_t{pool} std::less<size_t>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -564,8 +570,9 @@ TEST_F(memory_pool_allocator_test, profile_std_multimap) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data( std::multimap<size_t, test_data, std::less<size_t>, alloc_t> data(
std::less<size_t>(), alloc_t{pool} std::less<size_t>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -643,8 +650,9 @@ TEST_F(memory_pool_allocator_test, profile_std_list) {
decltype(pool), decltype(pool),
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
std::list<test_data, alloc_t> data(alloc_t{pool}); alloc_t alloc(pool);
std::list<test_data, alloc_t> data(alloc);
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
SCOPED_TIMER("irs::allocator"); SCOPED_TIMER("irs::allocator");
@ -676,7 +684,8 @@ TEST_F(memory_pool_allocator_test, profile_std_list) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
std::list<test_data, alloc_t> data(alloc_t{pool}); alloc_t alloc(pool);
std::list<test_data, alloc_t> data(alloc);
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
SCOPED_TIMER("irs::allocator(multi-size)"); SCOPED_TIMER("irs::allocator(multi-size)");
@ -709,7 +718,8 @@ TEST_F(memory_pool_allocator_test, profile_std_list) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
std::list<test_data, alloc_t> data(alloc_t{pool}); alloc_t alloc(pool);
std::list<test_data, alloc_t> data(alloc);
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
SCOPED_TIMER("irs::allocator(multi-size,initial_size==128)"); SCOPED_TIMER("irs::allocator(multi-size,initial_size==128)");
@ -795,9 +805,10 @@ TEST_F(memory_pool_allocator_test, profile_std_set) {
decltype(pool), decltype(pool),
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::set<test_data, std::less<test_data>, alloc_t> data( std::set<test_data, std::less<test_data>, alloc_t> data(
std::less<test_data>(), alloc_t{pool} std::less<test_data>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -834,8 +845,9 @@ TEST_F(memory_pool_allocator_test, profile_std_set) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::set<test_data, std::less<test_data>, alloc_t> data( std::set<test_data, std::less<test_data>, alloc_t> data(
std::less<test_data>(), alloc_t{pool} std::less<test_data>(), alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {
@ -873,8 +885,9 @@ TEST_F(memory_pool_allocator_test, profile_std_set) {
irs::memory::single_allocator_tag irs::memory::single_allocator_tag
> alloc_t; > alloc_t;
alloc_t alloc(pool);
std::set<test_data, std::less<test_data>, alloc_t> data( std::set<test_data, std::less<test_data>, alloc_t> data(
std::less<test_data>{}, alloc_t{pool} std::less<test_data>{}, alloc
); );
for (size_t i = 0; i < size; ++i) { for (size_t i = 0; i < size; ++i) {

View File

@ -927,7 +927,7 @@ TEST(concurrent_linked_list_test, push_pop) {
node_type* node = 0; node_type* node = 0;
auto rbegin = nodes.rbegin(); auto rbegin = nodes.rbegin();
while (node = list.pop()) { while ((node = list.pop())) {
ASSERT_EQ(&*rbegin, node); ASSERT_EQ(&*rbegin, node);
list.push(*node); list.push(*node);
node = list.pop(); node = list.pop();