diff --git a/.gitignore b/.gitignore index 83220e4e66..11402f2bfa 100644 --- a/.gitignore +++ b/.gitignore @@ -30,6 +30,7 @@ compile_commands.json instanceinfo.json testresult.json testsStarted +soc-pokec-* build.sh build*/ @@ -99,12 +100,6 @@ js/apps/system/_admin/aardvark/APP/frontend/build/scripts.html.part js/common/tests/shell/shell-database.js -3rdParty/boost/1.61.0/b2 -3rdParty/boost/1.61.0/bin.v2/ -3rdParty/boost/1.61.0/bjam -3rdParty/boost/1.61.0/project-config.jam -3rdParty/boost/1.61.0/stage/ - .gdb-history npm-debug.log diff --git a/.travis.yml b/.travis.yml index 577271bac0..8bbc535083 100644 --- a/.travis.yml +++ b/.travis.yml @@ -11,6 +11,7 @@ branches: - "2.8" - "3.0" - "3.1" + - "3.2" language: cpp cache: ccache diff --git a/3rdParty/V8/CMakeLists.txt b/3rdParty/V8/CMakeLists.txt index 627393f047..8f8003e542 100644 --- a/3rdParty/V8/CMakeLists.txt +++ b/3rdParty/V8/CMakeLists.txt @@ -578,14 +578,24 @@ set(ICU_DT ${ICU_DT} PARENT_SCOPE) set(ICU_DT_DEST "icudtl.dat" ) set(ICU_DT_DEST ${ICU_DT_DEST} PARENT_SCOPE) -configure_file( - "${ICU_DT}" - "${CMAKE_BINARY_DIR}/bin/${CONFIGURATION}/${ICU_DT_DEST}" - COPYONLY) -configure_file( - "${ICU_DT}" - "${CMAKE_BINARY_DIR}/tests/${CONFIGURATION}/${ICU_DT_DEST}" - COPYONLY) + +if (MSVC) + add_custom_command( + TARGET v8_build POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/bin/$/${ICU_DT_DEST}) + + add_custom_command( + TARGET v8_build POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/tests/$/${ICU_DT_DEST}) +else() + add_custom_command( + TARGET v8_build POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/bin/${ICU_DT_DEST}) + + add_custom_command( + TARGET v8_build POST_BUILD + COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/tests/${ICU_DT_DEST}) +endif() if (NOT WIN32) add_custom_target(nonthinV8 COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../../Installation/archive-de-thinner.sh ${V8_REL_TARGET_PATH} diff --git a/3rdParty/velocypack/include/velocypack/Collection.h b/3rdParty/velocypack/include/velocypack/Collection.h index 7699c9c9d3..9afea06d46 100644 --- a/3rdParty/velocypack/include/velocypack/Collection.h +++ b/3rdParty/velocypack/include/velocypack/Collection.h @@ -113,11 +113,16 @@ class Collection { ObjectIterator it(slice); while (it.valid()) { - result.emplace(std::move(it.key(true).copyString())); + result.emplace(it.key(true).copyString()); it.next(); } } + template + static void keys(Slice const* slice, T& result) { + return keys(*slice, result); + } + static void keys(Slice const& slice, std::vector& result) { // pre-allocate result vector result.reserve(checkOverflow(slice.length())); @@ -130,9 +135,19 @@ class Collection { } } + template + static void unorderedKeys(Slice const& slice, T& result) { + ObjectIterator it(slice, true); + + while (it.valid()) { + result.emplace(it.key(true).copyString()); + it.next(); + } + } + template - static void keys(Slice const* slice, T& result) { - return keys(*slice, result); + static void unorderedKeys(Slice const* slice, T& result) { + return unorderedKeys(*slice, result); } static Builder extract(Slice const& slice, int64_t from, int64_t to = INT64_MAX); diff --git a/CHANGELOG b/CHANGELOG index 32ff95c5a7..abfd1b858e 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,12 @@ devel ----- +* don't let read-only transactions block the WAL collector + + +v3.2.alpha2 (2017-02-20) +------------------------ + * ui: fixed issue #2065 * ui: fixed a dashboard related memory issue @@ -10,12 +16,12 @@ devel * Removed undocumented internal HTTP API: * PUT _api/edges - + The documented GET _api/edges and the undocumented POST _api/edges remains unmodified. * moved V8 code into a git submodule this requires running the command - + git submodule update --init --recursive once after a source code update or fresh checkout @@ -35,16 +41,22 @@ devel arangoexport can be used to export collections to json, jsonl or xml and export a graph or collections to xgmml. +* fixed a race condition when closing a connection + +* raised default hard limit on threads for very small to 64 + +* fixed negative counting of http connection in UI + v3.2.alpha1 (2017-02-05) ------------------------ * added figure `httpRequests` to AQL query statistics - + * removed revisions cache intermediate layer implementation - + * obsoleted startup options `--database.revision-cache-chunk-size` and - `--database.revision-cache-target-size` + `--database.revision-cache-target-size` * fix potential port number over-/underruns @@ -58,7 +70,29 @@ v3.2.alpha1 (2017-02-05) * more detailed stacktraces in Foxx apps -v3.1.11 (2017-02-14) +v3.1.12 (XXXX-XX-XX) +-------------------- + +* disable shell color escape sequences on Windows + +* fixed issue #2326 + +* fixed issue #2320 + +* fixed issue #2315 + +* fixed a race condition when closing a connection + +* raised default hard limit on threads for very small to 64 + +* fixed negative counting of http connection in UI + +* fixed a race when renaming collections + +* fixed a race when dropping databases + + +v3.1.11 (2017-02-17) -------------------- * fixed a race between connection closing and sending out last chunks of data to clients @@ -168,9 +202,10 @@ shards. * added server startup option `--query.memory-limit` * added convenience function to create vertex-centric indexes. -Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})` -That will create an index that can be used on OUTBOUND with filtering on the -edge attribute `label`. + + Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})` + That will create an index that can be used on OUTBOUND with filtering on the + edge attribute `label`. * change default log output for tools to stdout (instead of stderr) @@ -641,6 +676,8 @@ v3.1.alpha2 (2016-09-01) v3.0.13 (XXXX-XX-XX) -------------------- +* fixed issue #2315 + * fixed issue #2210 diff --git a/CMakeLists.txt b/CMakeLists.txt index 660fbbfe1d..2fe28ccd26 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -97,7 +97,7 @@ set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database") set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program") set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export") set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer") -set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - dataexporter") +set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - datae xporter") set(ARANGO_IMP_FRIENDLY_STRING "arangoimp - TSV/CSV/JSON importer") set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client") set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer") @@ -118,6 +118,7 @@ set(BIN_ARANGOVPACK arangovpack) # test binaries set(TEST_BASICS_SUITE basics_suite) +set(TEST_CACHE_SUITE cache_suite) set(TEST_GEO_SUITE geo_suite) set(CLEAN_AUTOGENERATED_FILES) set(PACKAGES_LIST) @@ -167,7 +168,7 @@ find_program (GIT_EXE git) if (DEFINED GIT_EXE AND IS_DIRECTORY "${CMAKE_SOURCE_DIR}/.git") execute_process( WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} - + COMMAND ${GIT_EXE} describe --all --tags --long --dirty=-dirty OUTPUT_VARIABLE GIT_OUTPUT) @@ -281,7 +282,7 @@ endif () math(EXPR BITS "8*${CMAKE_SIZEOF_VOID_P}") add_definitions("-DARANGODB_BITS=${BITS}") - + ################################################################################ ## COMPILER FEATURES ################################################################################ @@ -346,12 +347,12 @@ if (MSVC) add_definitions("-D_CRT_SECURE_NO_WARNINGS=1") add_definitions("-DFD_SETSIZE=8192") add_definitions("-DU_STATIC_IMPLEMENTATION=1") - + # https://blogs.msdn.microsoft.com/vcblog/2016/04/14/stl-fixes-in-vs-2015-update-2/ # https://connect.microsoft.com/VisualStudio/feedback/details/1892487 # http://lists.boost.org/boost-users/2016/04/85968.php add_definitions("-D_ENABLE_ATOMIC_ALIGNMENT_FIX") - + set(MSVC_LIBS Shlwapi.lib;crypt32.lib;WINMM.LIB;Ws2_32.lib) set(CMAKE_EXE_LINKER_FLAGS @@ -379,7 +380,7 @@ if (APPLE) endif () endif () - if (USE_LOCAL_CLOCK_GETTIME) + if (USE_LOCAL_CLOCK_GETTIME) message(STATUS "using a home-made clock_gettime") endif () endif () @@ -933,6 +934,7 @@ add_subdirectory(Documentation) add_dependencies(arangobench zlibstatic) add_dependencies(arangod zlibstatic) add_dependencies(arangodump zlibstatic) +add_dependencies(arangoexport zlibstatic) add_dependencies(arangoimp zlibstatic) add_dependencies(arangorestore zlibstatic) add_dependencies(arangosh zlibstatic) @@ -942,6 +944,7 @@ if (NOT USE_PRECOMPILED_V8) add_dependencies(arangobench v8_build) add_dependencies(arangod v8_build) add_dependencies(arangodump v8_build) + add_dependencies(arangoexport v8_build) add_dependencies(arangoimp v8_build) add_dependencies(arangorestore v8_build) add_dependencies(arangosh v8_build) diff --git a/Documentation/Books/Manual/Administration/Arangoexport.mdpp b/Documentation/Books/Manual/Administration/Arangoexport.mdpp index 76c8883d1b..9ab1ecafbb 100644 --- a/Documentation/Books/Manual/Administration/Arangoexport.mdpp +++ b/Documentation/Books/Manual/Administration/Arangoexport.mdpp @@ -48,7 +48,7 @@ Export JSONL unix> arangoexport --type jsonl --collection test -This exports the collection *test* into the output directory *export* as jsonl. Every line in the export is one document from the collection *test* as json. +This exports the collection *test* into the output directory *export* as [jsonl](http://jsonlines.org). Every line in the export is one document from the collection *test* as json. Export XML ---------- diff --git a/Documentation/Books/Manual/Administration/Configuration/Endpoint.mdpp b/Documentation/Books/Manual/Administration/Configuration/Endpoint.mdpp index 1999f910ad..155585e9b5 100644 --- a/Documentation/Books/Manual/Administration/Configuration/Endpoint.mdpp +++ b/Documentation/Books/Manual/Administration/Configuration/Endpoint.mdpp @@ -71,6 +71,43 @@ ArangoDB can also do a so called *broadcast bind* using host. This may be useful on development systems that frequently change their network setup like laptops. +### Special note on IPv6 link-local addresses + +ArangoDB can also listen to IPv6 link-local addresses via adding the zone ID +to the IPv6 address in the form `[ipv6-link-local-address%zone-id]`. However, +what you probably instead want is to bind to a local IPv6 address. Local IPv6 +addresses start with `fd`. If you only see a `fe80:` IPv6 address in your +interface configuration but no IPv6 address starting with `fd` your interface +has no local IPv6 address assigned. You can read more about IPv6 link-local +addresses [here](https://en.wikipedia.org/wiki/Link-local_address#IPv6). + +** Example ** + +Bind to a link-local and local IPv6 address. + + unix> ifconfig + +This command lists all interfaces and assigned ip addresses. The link-local +address may be `fe80::6257:18ff:fe82:3ec6%eth0` (IPv6 address plus interface name). +A local IPv6 address may be `fd12:3456::789a`. To bind ArangoDB to it start +*arangod* with `--server.endpoint tcp://[fe80::6257:18ff:fe82:3ec6%eth0]:8529`. +Use telnet to test the connection. + + unix> telnet fe80::6257:18ff:fe82:3ec6%eth0 8529 + Trying fe80::6257:18ff:fe82:3ec6... + Connected to my-machine. + Escape character is '^]'. + GET / HTTP/1.1 + + HTTP/1.1 301 Moved Permanently + Location: /_db/_system/_admin/aardvark/index.html + Content-Type: text/html + Server: ArangoDB + Connection: Keep-Alive + Content-Length: 197 + + Moved

Moved

This page has moved to /_db/_system/_admin/aardvark/index.html.

+ ### Reuse address diff --git a/Installation/Jenkins/build.sh b/Installation/Jenkins/build.sh index 75b064ea1d..344149c9bd 100755 --- a/Installation/Jenkins/build.sh +++ b/Installation/Jenkins/build.sh @@ -330,7 +330,11 @@ while [ $# -gt 0 ]; do --targetDir) shift TARGET_DIR=$1 - CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=$1" + if test "`uname -o||true`" == "Cygwin"; then + CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=`cygpath --windows $1`" + else + CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=$1" + fi shift ;; diff --git a/Installation/Ubuntu/snapcraft.yaml.in b/Installation/Ubuntu/snapcraft.yaml.in index 0ea57aada2..4313f3a5d2 100644 --- a/Installation/Ubuntu/snapcraft.yaml.in +++ b/Installation/Ubuntu/snapcraft.yaml.in @@ -1,5 +1,5 @@ name: @CPACK_PACKAGE_NAME@ -version: @CPACK_PACKAGE_VERSION@ +version: @CPACK_PACKAGE_VERSION@-@ARANGODB_PACKAGE_REVISION@ description: "ArangoDB is a native multi-model database with flexible data models for documents, graphs, and key-values. Build high performance applications using a convenient SQL-like query language or JavaScript extensions. https://www.arangodb.com" @@ -15,9 +15,9 @@ grade: stable parts: @CPACK_PACKAGE_NAME@: - source: @CPACK_PACKAGE_TGZ@ + source: @CMAKE_BINARY_DIR@/@CPACK_PACKAGE_FILE_NAME@.tar.gz plugin: dump - snap: + prime: - -etc - -var - -lib diff --git a/README_maintainers.md b/README_maintainers.md index f109935efa..d93eafaa01 100644 --- a/README_maintainers.md +++ b/README_maintainers.md @@ -366,6 +366,34 @@ via the environment variable or in the menu. Given we want to store the symbols You then will be able to see stack traces in the debugger. +You may also try to download the symbols manually using: + + symchk.exe arangod.exe /s SRV*e:/symbol_cache/cache*https://www.arangodb.com/repositories/symsrv/ + + +The symbolserver over at https://www.arangodb.com/repositories/symsrv/ is browseable; thus you can easily download the files you need by hand. It contains of a list of directories corosponding to the components of arangodb: + + - arango - the basic arangodb library needed by all components + - arango_v8 - the basic V8 wrappers needed by all components + - arangod - the server process + - the client utilities: + - arangob + - arangobench + - arangoexport + - arangoimp + - arangorestore + - arangosh + - arangovpack + +In these directories you will find subdirectories with the hash corosponding to the id of the binaries. Their date should corrospond to the release date of their respective arango release. + +This means i.e. for ArangoDB 3.1.11: + + https://www.arangodb.com/repositories/symsrv/arangod.pdb/A8B899D2EDFC40E994C30C32FCE5FB346/arangod.pd_ + +This file is a microsoft cabinet file, which is a little bit compressed. You can dismantle it so the windows explorer offers you its proper handler by renaming it to .cab; click on the now named `arangod.cab`, copy the contained arangod.pdb into your symbol path. + + Coredump analysis ----------------- While Visual studio may cary a nice shiny gui, the concept of GUI fails miserably i.e. in testautomation. Getting an overview over all running threads is a tedious task with it. Here the commandline version of [WinDBG](http://www.windbg.org/) cdb comes to the aid. `testing.js` utilizes it to obtain automatical stack traces for crashes. diff --git a/UnitTests/CMakeLists.txt b/UnitTests/CMakeLists.txt index 9d7f5d7057..a71f02f0d1 100644 --- a/UnitTests/CMakeLists.txt +++ b/UnitTests/CMakeLists.txt @@ -34,8 +34,7 @@ add_executable(${TEST_BASICS_SUITE} ../lib/Basics/WorkMonitorDummy.cpp ) -include_directories( - ${TEST_BASICS_SUITE} +include_directories(${TEST_BASICS_SUITE} PUBLIC ${Boost_UNIT_TEST_INCLUDE_DIR} ) @@ -48,7 +47,56 @@ target_link_libraries(${TEST_BASICS_SUITE} ) if (NOT USE_PRECOMPILED_V8) - add_dependencies(basics_suite v8_build) + add_dependencies(${TEST_BASICS_SUITE} v8_build) +endif () + +################################################################################ +## cache_suite +################################################################################ + +add_executable(${TEST_CACHE_SUITE} + Cache/Runner.cpp + Cache/CachedValue.cpp + Cache/FrequencyBuffer.cpp + Cache/Manager.cpp + Cache/Metadata.cpp + Cache/MockScheduler.cpp + Cache/PlainBucket.cpp + Cache/PlainCache.cpp + Cache/Rebalancer.cpp + Cache/State.cpp + Cache/TransactionalBucket.cpp + Cache/TransactionWindow.cpp + ../lib/Basics/WorkMonitorDummy.cpp + ../arangod/Cache/Cache.cpp + ../arangod/Cache/CacheManagerFeatureThreads.cpp + ../arangod/Cache/CachedValue.cpp + ../arangod/Cache/Manager.cpp + ../arangod/Cache/ManagerTasks.cpp + ../arangod/Cache/Metadata.cpp + ../arangod/Cache/PlainBucket.cpp + ../arangod/Cache/PlainCache.cpp + ../arangod/Cache/Rebalancer.cpp + ../arangod/Cache/State.cpp + ../arangod/Cache/TransactionalBucket.cpp + ../arangod/Cache/TransactionalCache.cpp + ../arangod/Cache/TransactionWindow.cpp +) + +include_directories(${TEST_CACHE_SUITE} + PUBLIC ${Boost_UNIT_TEST_INCLUDE_DIR} +) + +target_link_libraries(${TEST_CACHE_SUITE} + ${LIB_ARANGO} + ${MSVC_LIBS} + boost_system + boost_boost + ${SYSTEM_LIBRARIES} +) + +if (NOT USE_PRECOMPILED_V8) + add_dependencies(${TEST_CACHE_SUITE} v8_build) endif () ################################################################################ @@ -68,5 +116,5 @@ target_link_libraries(${TEST_GEO_SUITE} ) if (NOT USE_PRECOMPILED_V8) - add_dependencies(geo_suite v8_build) + add_dependencies(${TEST_GEO_SUITE} v8_build) endif () diff --git a/UnitTests/Cache/CachedValue.cpp b/UnitTests/Cache/CachedValue.cpp new file mode 100644 index 0000000000..fa8371ceb3 --- /dev/null +++ b/UnitTests/Cache/CachedValue.cpp @@ -0,0 +1,183 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::CachedValue +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/CachedValue.h" + +#include +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheCachedValueSetup { + CCacheCachedValueSetup() { BOOST_TEST_MESSAGE("setup CachedValue"); } + + ~CCacheCachedValueSetup() { BOOST_TEST_MESSAGE("tear-down CachedValue"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheCachedValueTest, CCacheCachedValueSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test construct with valid data +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_construct_valid) { + uint64_t k = 1; + std::string v("test"); + CachedValue* cv; + + // fixed key, variable value + cv = CachedValue::construct(&k, sizeof(uint64_t), v.data(), v.size()); + BOOST_CHECK(nullptr != cv); + BOOST_CHECK_EQUAL(sizeof(uint64_t), cv->keySize); + BOOST_CHECK_EQUAL(v.size(), cv->valueSize); + BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t) + v.size(), + cv->size()); + BOOST_CHECK_EQUAL(k, *reinterpret_cast(cv->key())); + BOOST_CHECK_EQUAL(0, memcmp(v.data(), cv->value(), v.size())); + delete cv; + + // variable key, fixed value + cv = CachedValue::construct(v.data(), v.size(), &k, sizeof(uint64_t)); + BOOST_CHECK(nullptr != cv); + BOOST_CHECK_EQUAL(v.size(), cv->keySize); + BOOST_CHECK_EQUAL(sizeof(uint64_t), cv->valueSize); + BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t) + v.size(), + cv->size()); + BOOST_CHECK_EQUAL(0, memcmp(v.data(), cv->key(), v.size())); + BOOST_CHECK_EQUAL(k, *reinterpret_cast(cv->value())); + delete cv; + + // fixed key, zero length value + cv = CachedValue::construct(&k, sizeof(uint64_t), nullptr, 0); + BOOST_CHECK(nullptr != cv); + BOOST_CHECK_EQUAL(sizeof(uint64_t), cv->keySize); + BOOST_CHECK_EQUAL(0ULL, cv->valueSize); + BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t), cv->size()); + BOOST_CHECK_EQUAL(k, *reinterpret_cast(cv->key())); + BOOST_CHECK(nullptr == cv->value()); + delete cv; +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test construct with invalid data +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_construct_invalid) { + uint64_t k = 1; + std::string v("test"); + CachedValue* cv; + + // zero size key + cv = CachedValue::construct(&k, 0, v.data(), v.size()); + BOOST_CHECK(nullptr == cv); + + // nullptr key, zero size + cv = CachedValue::construct(nullptr, 0, v.data(), v.size()); + BOOST_CHECK(nullptr == cv); + + // nullptr key, non-zero size + cv = CachedValue::construct(nullptr, sizeof(uint64_t), v.data(), v.size()); + BOOST_CHECK(nullptr == cv); + + // nullptr value, non-zero length + cv = CachedValue::construct(&k, sizeof(uint64_t), nullptr, v.size()); + BOOST_CHECK(nullptr == cv); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test copy +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_copy) { + uint64_t k = 1; + std::string v("test"); + + // fixed key, variable value + auto original = + CachedValue::construct(&k, sizeof(uint64_t), v.data(), v.size()); + auto copy = original->copy(); + BOOST_CHECK(nullptr != copy); + BOOST_CHECK_EQUAL(sizeof(uint64_t), copy->keySize); + BOOST_CHECK_EQUAL(v.size(), copy->valueSize); + BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t) + v.size(), + copy->size()); + BOOST_CHECK_EQUAL(k, *reinterpret_cast(copy->key())); + BOOST_CHECK_EQUAL(0, memcmp(v.data(), copy->value(), v.size())); + delete original; + delete copy; +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test key comparison +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_key_comparison) { + std::string k1("test"); + std::string k2("testing"); + std::string k3("TEST"); + uint64_t v = 1; + + auto cv = CachedValue::construct(k1.data(), k1.size(), &v, sizeof(uint64_t)); + + // same key + BOOST_CHECK(cv->sameKey(k1.data(), k1.size())); + + // different length, matching prefix + BOOST_CHECK(!cv->sameKey(k2.data(), k2.size())); + + // same length, different key + BOOST_CHECK(!cv->sameKey(k3.data(), k3.size())); + + delete cv; +} +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/FrequencyBuffer.cpp b/UnitTests/Cache/FrequencyBuffer.cpp new file mode 100644 index 0000000000..25808bd8e6 --- /dev/null +++ b/UnitTests/Cache/FrequencyBuffer.cpp @@ -0,0 +1,141 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::FrequencyBuffer +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/FrequencyBuffer.h" + +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheFrequencyBufferSetup { + CCacheFrequencyBufferSetup() { BOOST_TEST_MESSAGE("setup FrequencyBuffer"); } + + ~CCacheFrequencyBufferSetup() { + BOOST_TEST_MESSAGE("tear-down FrequencyBuffer"); + } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheFrequencyBufferTest, CCacheFrequencyBufferSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test behavior with ints +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_uint8_t) { + uint8_t zero = 0; + uint8_t one = 1; + uint8_t two = 2; + + // check that default construction is as expected + BOOST_CHECK(uint8_t() == zero); + + FrequencyBuffer buffer(8); + BOOST_CHECK_EQUAL(buffer.memoryUsage(), sizeof(FrequencyBuffer) + 8); + + for (size_t i = 0; i < 4; i++) { + buffer.insertRecord(two); + } + for (size_t i = 0; i < 2; i++) { + buffer.insertRecord(one); + } + + auto frequencies = buffer.getFrequencies(); + BOOST_CHECK_EQUAL(2ULL, frequencies->size()); + BOOST_CHECK_EQUAL(one, (*frequencies)[0].first); + BOOST_CHECK_EQUAL(2ULL, (*frequencies)[0].second); + BOOST_CHECK_EQUAL(two, (*frequencies)[1].first); + BOOST_CHECK_EQUAL(4ULL, (*frequencies)[1].second); + + for (size_t i = 0; i < 8; i++) { + buffer.insertRecord(one); + } + + frequencies = buffer.getFrequencies(); + BOOST_CHECK_EQUAL(1ULL, frequencies->size()); + BOOST_CHECK_EQUAL(one, (*frequencies)[0].first); + BOOST_CHECK_EQUAL(8ULL, (*frequencies)[0].second); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test behavior with pointers +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_pointers) { + uint8_t* zero = nullptr; + uint8_t one = 1; + uint8_t two = 2; + + // check that default construction is as expected + typedef uint8_t* smallptr; + BOOST_CHECK(smallptr() == zero); + + FrequencyBuffer buffer(8); + BOOST_CHECK_EQUAL(buffer.memoryUsage(), + sizeof(FrequencyBuffer) + (8 * sizeof(uint8_t*))); + + for (size_t i = 0; i < 4; i++) { + buffer.insertRecord(&two); + } + for (size_t i = 0; i < 2; i++) { + buffer.insertRecord(&one); + } + + auto frequencies = buffer.getFrequencies(); + BOOST_CHECK_EQUAL(2ULL, frequencies->size()); + BOOST_CHECK_EQUAL(&one, (*frequencies)[0].first); + BOOST_CHECK_EQUAL(2ULL, (*frequencies)[0].second); + BOOST_CHECK_EQUAL(&two, (*frequencies)[1].first); + BOOST_CHECK_EQUAL(4ULL, (*frequencies)[1].second); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/Manager.cpp b/UnitTests/Cache/Manager.cpp new file mode 100644 index 0000000000..4d91af4025 --- /dev/null +++ b/UnitTests/Cache/Manager.cpp @@ -0,0 +1,257 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::Manager +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" +#include "Random/RandomGenerator.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/CacheManagerFeatureThreads.h" +#include "Cache/Manager.h" +#include "Cache/PlainCache.h" + +#include "MockScheduler.h" + +#include +#include +#include +#include +#include + +#include + +using namespace arangodb; +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheManagerSetup { + CCacheManagerSetup() { BOOST_TEST_MESSAGE("setup Manager"); } + + ~CCacheManagerSetup() { BOOST_TEST_MESSAGE("tear-down Manager"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheManagerTest, CCacheManagerSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test constructor with valid data +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_constructor) { + uint64_t requestLimit = 1024 * 1024; + Manager manager(nullptr, requestLimit); + + BOOST_CHECK_EQUAL(requestLimit, manager.globalLimit()); + + BOOST_CHECK(0ULL < manager.globalAllocation()); + BOOST_CHECK(requestLimit > manager.globalAllocation()); + + uint64_t bigRequestLimit = 4ULL * 1024ULL * 1024ULL * 1024ULL; + Manager bigManager(nullptr, bigRequestLimit); + + BOOST_CHECK_EQUAL(bigRequestLimit, bigManager.globalLimit()); + + BOOST_CHECK((1024ULL * 1024ULL) < bigManager.globalAllocation()); + BOOST_CHECK(bigRequestLimit > bigManager.globalAllocation()); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test mixed load behavior (multi-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_mixed_load) { + uint64_t initialSize = 16ULL * 1024ULL; + RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE); + MockScheduler scheduler(4); + Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL); + size_t cacheCount = 4; + size_t threadCount = 4; + std::vector> caches; + for (size_t i = 0; i < cacheCount; i++) { + caches.emplace_back( + manager.createCache(Manager::CacheType::Plain, initialSize, true)); + } + + uint64_t chunkSize = 4 * 1024 * 1024; + uint64_t initialInserts = 1 * 1024 * 1024; + uint64_t operationCount = 4 * 1024 * 1024; + std::atomic hitCount(0); + std::atomic missCount(0); + auto worker = [&manager, &caches, cacheCount, initialInserts, operationCount, + &hitCount, + &missCount](uint64_t lower, uint64_t upper) -> void { + // fill with some initial data + for (uint64_t i = 0; i < initialInserts; i++) { + uint64_t item = lower + i; + size_t cacheIndex = item % cacheCount; + CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t), + &item, sizeof(uint64_t)); + bool ok = caches[cacheIndex]->insert(value); + if (!ok) { + delete value; + } + } + + // initialize valid range for keys that *might* be in cache + uint64_t validLower = lower; + uint64_t validUpper = lower + initialInserts - 1; + + // commence mixed workload + for (uint64_t i = 0; i < operationCount; i++) { + uint32_t r = RandomGenerator::interval(static_cast(99UL)); + + if (r >= 99) { // remove something + if (validLower == validUpper) { + continue; // removed too much + } + + uint64_t item = validLower++; + size_t cacheIndex = item % cacheCount; + + caches[cacheIndex]->remove(&item, sizeof(uint64_t)); + } else if (r >= 95) { // insert something + if (validUpper == upper) { + continue; // already maxed out range + } + + uint64_t item = ++validUpper; + size_t cacheIndex = item % cacheCount; + CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t), + &item, sizeof(uint64_t)); + bool ok = caches[cacheIndex]->insert(value); + if (!ok) { + delete value; + } + } else { // lookup something + uint64_t item = RandomGenerator::interval( + static_cast(validLower), static_cast(validUpper)); + size_t cacheIndex = item % cacheCount; + + Cache::Finding f = caches[cacheIndex]->find(&item, sizeof(uint64_t)); + if (f.found()) { + hitCount++; + TRI_ASSERT(f.value() != nullptr); + TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t))); + } else { + missCount++; + TRI_ASSERT(f.value() == nullptr); + } + } + } + }; + + std::vector threads; + // dispatch threads + for (size_t i = 0; i < threadCount; i++) { + uint64_t lower = i * chunkSize; + uint64_t upper = ((i + 1) * chunkSize) - 1; + threads.push_back(new std::thread(worker, lower, upper)); + } + + // join threads + for (auto t : threads) { + t->join(); + delete t; + } + + for (auto cache : caches) { + manager.destroyCache(cache); + } + + RandomGenerator::shutdown(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test creation/destruction chaos (multi-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_lifecycle_chaos) { + uint64_t initialSize = 16ULL * 1024ULL; + RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE); + MockScheduler scheduler(4); + Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL); + size_t threadCount = 4; + uint64_t operationCount = 4ULL * 1024ULL; + + auto worker = [&manager, initialSize, operationCount]() -> void { + std::queue> caches; + + for (uint64_t i = 0; i < operationCount; i++) { + uint32_t r = RandomGenerator::interval(static_cast(1UL)); + switch (r) { + case 0: { + caches.emplace(manager.createCache(Manager::CacheType::Plain, + initialSize, true)); + } + case 1: + default: { + if (!caches.empty()) { + auto cache = caches.front(); + caches.pop(); + manager.destroyCache(cache); + } + } + } + } + }; + + std::vector threads; + // dispatch threads + for (size_t i = 0; i < threadCount; i++) { + threads.push_back(new std::thread(worker)); + } + + // join threads + for (auto t : threads) { + t->join(); + delete t; + } + + RandomGenerator::shutdown(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/Metadata.cpp b/UnitTests/Cache/Metadata.cpp new file mode 100644 index 0000000000..47a2337dc1 --- /dev/null +++ b/UnitTests/Cache/Metadata.cpp @@ -0,0 +1,207 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::Metadata +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/Metadata.h" + +#include +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheMetadataSetup { + CCacheMetadataSetup() { BOOST_TEST_MESSAGE("setup Metadata"); } + + ~CCacheMetadataSetup() { BOOST_TEST_MESSAGE("tear-down Metadata"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheMetadataTest, CCacheMetadataSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test constructor with valid data +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_constructor) { + uint64_t dummy; + std::shared_ptr dummyCache(reinterpret_cast(&dummy), + [](Cache* p) -> void {}); + uint8_t dummyTable; + uint32_t logSize = 1; + uint64_t limit = 1024; + + Metadata metadata(dummyCache, limit, &dummyTable, logSize); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test getters +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_getters) { + uint64_t dummy; + std::shared_ptr dummyCache(reinterpret_cast(&dummy), + [](Cache* p) -> void {}); + uint8_t dummyTable; + uint32_t logSize = 1; + uint64_t limit = 1024; + + Metadata metadata(dummyCache, limit, &dummyTable, logSize); + + metadata.lock(); + + BOOST_CHECK(dummyCache == metadata.cache()); + + BOOST_CHECK_EQUAL(logSize, metadata.logSize()); + BOOST_CHECK_EQUAL(0UL, metadata.auxiliaryLogSize()); + + BOOST_CHECK_EQUAL(limit, metadata.softLimit()); + BOOST_CHECK_EQUAL(limit, metadata.hardLimit()); + BOOST_CHECK_EQUAL(0UL, metadata.usage()); + + BOOST_CHECK(&dummyTable == metadata.table()); + BOOST_CHECK(nullptr == metadata.auxiliaryTable()); + + metadata.unlock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test usage limits +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_usage_limits) { + uint64_t dummy; + std::shared_ptr dummyCache(reinterpret_cast(&dummy), + [](Cache* p) -> void {}); + uint8_t dummyTable; + uint32_t logSize = 1; + bool success; + + Metadata metadata(dummyCache, 1024ULL, &dummyTable, logSize); + + metadata.lock(); + + success = metadata.adjustUsageIfAllowed(512LL); + BOOST_CHECK(success); + success = metadata.adjustUsageIfAllowed(512LL); + BOOST_CHECK(success); + success = metadata.adjustUsageIfAllowed(512LL); + BOOST_CHECK(!success); + + success = metadata.adjustLimits(2048ULL, 2048ULL); + BOOST_CHECK(success); + + success = metadata.adjustUsageIfAllowed(1024LL); + BOOST_CHECK(success); + + success = metadata.adjustLimits(1024ULL, 2048ULL); + BOOST_CHECK(success); + + success = metadata.adjustUsageIfAllowed(512LL); + BOOST_CHECK(!success); + success = metadata.adjustUsageIfAllowed(-512LL); + BOOST_CHECK(success); + success = metadata.adjustUsageIfAllowed(512LL); + BOOST_CHECK(success); + success = metadata.adjustUsageIfAllowed(-1024LL); + BOOST_CHECK(success); + success = metadata.adjustUsageIfAllowed(512LL); + BOOST_CHECK(!success); + + success = metadata.adjustLimits(1024ULL, 1024ULL); + BOOST_CHECK(success); + success = metadata.adjustLimits(512ULL, 512ULL); + BOOST_CHECK(!success); + + metadata.unlock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test migration methods +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_migration) { + uint64_t dummy; + std::shared_ptr dummyCache(reinterpret_cast(&dummy), + [](Cache* p) -> void {}); + uint8_t dummyTable; + uint8_t dummyAuxiliaryTable; + uint32_t logSize = 1; + uint32_t auxiliaryLogSize = 2; + uint64_t limit = 1024; + + Metadata metadata(dummyCache, limit, &dummyTable, logSize); + + metadata.lock(); + + metadata.grantAuxiliaryTable(&dummyAuxiliaryTable, auxiliaryLogSize); + BOOST_CHECK_EQUAL(auxiliaryLogSize, metadata.auxiliaryLogSize()); + BOOST_CHECK(&dummyAuxiliaryTable == metadata.auxiliaryTable()); + + metadata.swapTables(); + BOOST_CHECK_EQUAL(logSize, metadata.auxiliaryLogSize()); + BOOST_CHECK_EQUAL(auxiliaryLogSize, metadata.logSize()); + BOOST_CHECK(&dummyTable == metadata.auxiliaryTable()); + BOOST_CHECK(&dummyAuxiliaryTable == metadata.table()); + + uint8_t* result = metadata.releaseAuxiliaryTable(); + BOOST_CHECK_EQUAL(0UL, metadata.auxiliaryLogSize()); + BOOST_CHECK(nullptr == metadata.auxiliaryTable()); + BOOST_CHECK(result == &dummyTable); + + result = metadata.releaseTable(); + BOOST_CHECK_EQUAL(0UL, metadata.logSize()); + BOOST_CHECK(nullptr == metadata.table()); + BOOST_CHECK(result == &dummyAuxiliaryTable); + + metadata.unlock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/MockScheduler.cpp b/UnitTests/Cache/MockScheduler.cpp new file mode 100644 index 0000000000..04877b0e45 --- /dev/null +++ b/UnitTests/Cache/MockScheduler.cpp @@ -0,0 +1,59 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief helper for cache suite +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "MockScheduler.h" +#include "Basics/Common.h" + +#include +#include +#include + +#include + +using namespace arangodb::cache; + +MockScheduler::MockScheduler(size_t threads) + : _ioService(new boost::asio::io_service()), + _serviceGuard(new boost::asio::io_service::work(*_ioService)) { + for (size_t i = 0; i < threads; i++) { + auto worker = std::bind(static_cast( + &boost::asio::io_service::run), + _ioService.get()); + _group.emplace_back(new std::thread(worker)); + } +} + +MockScheduler::~MockScheduler() { + _serviceGuard.reset(); + for (auto g : _group) { + g->join(); + delete g; + } + _ioService->stop(); +} + +boost::asio::io_service* MockScheduler::ioService() { return _ioService.get(); } diff --git a/UnitTests/Cache/MockScheduler.h b/UnitTests/Cache/MockScheduler.h new file mode 100644 index 0000000000..f47a790033 --- /dev/null +++ b/UnitTests/Cache/MockScheduler.h @@ -0,0 +1,57 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief helper for cache suite +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#ifndef UNITTESTS_CACHE_MOCK_SCHEDULER_H +#define UNITTESTS_CACHE_MOCK_SCHEDULER_H + +#include "Basics/Common.h" + +#include "Basics/asio-helper.h" + +#include +#include +#include + +namespace arangodb { +namespace cache { + +class MockScheduler { + typedef std::unique_ptr asio_worker; + std::unique_ptr _ioService; + std::unique_ptr _serviceGuard; + std::vector _group; + + public: + MockScheduler(size_t threads); + ~MockScheduler(); + boost::asio::io_service* ioService(); +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/UnitTests/Cache/PlainBucket.cpp b/UnitTests/Cache/PlainBucket.cpp new file mode 100644 index 0000000000..5432c1ec76 --- /dev/null +++ b/UnitTests/Cache/PlainBucket.cpp @@ -0,0 +1,236 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::PlainBucket +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/PlainBucket.h" + +#include +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCachePlainBucketSetup { + CCachePlainBucketSetup() { BOOST_TEST_MESSAGE("setup PlainBucket"); } + + ~CCachePlainBucketSetup() { BOOST_TEST_MESSAGE("tear-down PlainBucket"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCachePlainBucketTest, CCachePlainBucketSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test insertion to full and fail beyond +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_insertion) { + PlainBucket bucket; + bool success; + + uint32_t hashes[6] = { + 1, 2, 3, + 4, 5, 6}; // don't have to be real, but should be unique and non-zero + uint64_t keys[6] = {0, 1, 2, 3, 4, 5}; + uint64_t values[6] = {0, 1, 2, 3, 4, 5}; + CachedValue* ptrs[6]; + for (size_t i = 0; i < 6; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(-1LL); + BOOST_CHECK(success); + + // insert five to fill + BOOST_CHECK(!bucket.isFull()); + for (size_t i = 0; i < 5; i++) { + bucket.insert(hashes[i], ptrs[i]); + if (i < 4) { + BOOST_CHECK(!bucket.isFull()); + } else { + BOOST_CHECK(bucket.isFull()); + } + } + for (size_t i = 0; i < 5; i++) { + CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + // check that insert is ignored if full + bucket.insert(hashes[5], ptrs[5]); + CachedValue* res = bucket.find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); + BOOST_CHECK(nullptr == res); + + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 6; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test removal +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_removal) { + PlainBucket bucket; + bool success; + + uint32_t hashes[3] = { + 1, 2, 3}; // don't have to be real, but should be unique and non-zero + uint64_t keys[3] = {0, 1, 2}; + uint64_t values[3] = {0, 1, 2}; + CachedValue* ptrs[3]; + for (size_t i = 0; i < 3; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(-1LL); + BOOST_CHECK(success); + + for (size_t i = 0; i < 3; i++) { + bucket.insert(hashes[i], ptrs[i]); + } + for (size_t i = 0; i < 3; i++) { + CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + CachedValue* res; + res = bucket.remove(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(res == ptrs[1]); + res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(nullptr == res); + res = bucket.remove(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(res == ptrs[0]); + res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(nullptr == res); + res = bucket.remove(hashes[2], ptrs[2]->key(), ptrs[2]->keySize); + BOOST_CHECK(res == ptrs[2]); + res = bucket.find(hashes[2], ptrs[2]->key(), ptrs[2]->keySize); + BOOST_CHECK(nullptr == res); + + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 3; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test eviction with subsequent insertion +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_eviction) { + PlainBucket bucket; + bool success; + + uint32_t hashes[6] = { + 1, 2, 3, + 4, 5, 6}; // don't have to be real, but should be unique and non-zero + uint64_t keys[6] = {0, 1, 2, 3, 4, 5}; + uint64_t values[6] = {0, 1, 2, 3, 4, 5}; + CachedValue* ptrs[6]; + for (size_t i = 0; i < 6; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(-1LL); + BOOST_CHECK(success); + + // insert five to fill + BOOST_CHECK(!bucket.isFull()); + for (size_t i = 0; i < 5; i++) { + bucket.insert(hashes[i], ptrs[i]); + if (i < 4) { + BOOST_CHECK(!bucket.isFull()); + } else { + BOOST_CHECK(bucket.isFull()); + } + } + for (size_t i = 0; i < 5; i++) { + CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + // check that we get proper eviction candidate + CachedValue* candidate = bucket.evictionCandidate(); + BOOST_CHECK(candidate == ptrs[0]); + bucket.evict(candidate, false); + CachedValue* res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(nullptr == res); + BOOST_CHECK(!bucket.isFull()); + + // check that we still find the right candidate if not full + candidate = bucket.evictionCandidate(); + BOOST_CHECK(candidate == ptrs[1]); + bucket.evict(candidate, true); + res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(nullptr == res); + BOOST_CHECK(!bucket.isFull()); + + // check that we can insert now after eviction optimized for insertion + bucket.insert(hashes[5], ptrs[5]); + res = bucket.find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); + BOOST_CHECK(res == ptrs[5]); + + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 6; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/PlainCache.cpp b/UnitTests/Cache/PlainCache.cpp new file mode 100644 index 0000000000..3a6222f46a --- /dev/null +++ b/UnitTests/Cache/PlainCache.cpp @@ -0,0 +1,360 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::PlainBucket +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" +#include "Random/RandomGenerator.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/Manager.h" +#include "Cache/PlainCache.h" + +#include "MockScheduler.h" + +#include +#include +#include +#include + +#include + +using namespace arangodb; +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCachePlainCacheSetup { + CCachePlainCacheSetup() { BOOST_TEST_MESSAGE("setup PlainCache"); } + + ~CCachePlainCacheSetup() { BOOST_TEST_MESSAGE("tear-down PlainCache"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCachePlainCacheTest, CCachePlainCacheSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test construction (single-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_st_construction) { + Manager manager(nullptr, 1024ULL * 1024ULL); + auto cache1 = + manager.createCache(Manager::CacheType::Plain, 256ULL * 1024ULL, false); + auto cache2 = + manager.createCache(Manager::CacheType::Plain, 512ULL * 1024ULL, false); + + BOOST_CHECK_EQUAL(0ULL, cache1->usage()); + BOOST_CHECK_EQUAL(256ULL * 1024ULL, cache1->limit()); + BOOST_CHECK_EQUAL(0ULL, cache2->usage()); + BOOST_CHECK(512ULL * 1024ULL > cache2->limit()); + + manager.destroyCache(cache1); + manager.destroyCache(cache2); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test insertion (single-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_st_insertion) { + uint64_t cacheLimit = 256ULL * 1024ULL; + Manager manager(nullptr, 4ULL * cacheLimit); + auto cache = + manager.createCache(Manager::CacheType::Plain, cacheLimit, false); + + for (uint64_t i = 0; i < 1024; i++) { + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t)); + bool success = cache->insert(value); + BOOST_CHECK(success); + auto f = cache->find(&i, sizeof(uint64_t)); + BOOST_CHECK(f.found()); + } + + for (uint64_t i = 0; i < 1024; i++) { + uint64_t j = 2 * i; + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &j, sizeof(uint64_t)); + bool success = cache->insert(value); + BOOST_CHECK(success); + auto f = cache->find(&i, sizeof(uint64_t)); + BOOST_CHECK(f.found()); + BOOST_CHECK(0 == memcmp(f.value()->value(), &j, sizeof(uint64_t))); + } + + uint64_t notInserted = 0; + for (uint64_t i = 1024; i < 128 * 1024; i++) { + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t)); + bool success = cache->insert(value); + if (success) { + auto f = cache->find(&i, sizeof(uint64_t)); + BOOST_CHECK(f.found()); + } else { + delete value; + notInserted++; + } + } + BOOST_CHECK(notInserted > 0); + + manager.destroyCache(cache); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test removal (single-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_st_removal) { + uint64_t cacheLimit = 256ULL * 1024ULL; + Manager manager(nullptr, 4ULL * cacheLimit); + auto cache = + manager.createCache(Manager::CacheType::Plain, cacheLimit, false); + + for (uint64_t i = 0; i < 1024; i++) { + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t)); + bool success = cache->insert(value); + BOOST_CHECK(success); + auto f = cache->find(&i, sizeof(uint64_t)); + BOOST_CHECK(f.found()); + BOOST_CHECK(f.value() != nullptr); + BOOST_CHECK(f.value()->sameKey(&i, sizeof(uint64_t))); + } + + // test removal of bogus keys + for (uint64_t i = 1024; i < 2048; i++) { + bool removed = cache->remove(&i, sizeof(uint64_t)); + BOOST_ASSERT(removed); + // ensure existing keys not removed + for (uint64_t j = 0; j < 1024; j++) { + auto f = cache->find(&j, sizeof(uint64_t)); + BOOST_CHECK(f.found()); + BOOST_CHECK(f.value() != nullptr); + BOOST_CHECK(f.value()->sameKey(&j, sizeof(uint64_t))); + } + } + + // remove actual keys + for (uint64_t i = 0; i < 1024; i++) { + bool removed = cache->remove(&i, sizeof(uint64_t)); + BOOST_CHECK(removed); + auto f = cache->find(&i, sizeof(uint64_t)); + BOOST_CHECK(!f.found()); + } + + manager.destroyCache(cache); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test growth behavior (single-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_st_growth) { + uint64_t initialSize = 16ULL * 1024ULL; + uint64_t minimumSize = 64ULL * initialSize; + MockScheduler scheduler(4); + Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL); + auto cache = + manager.createCache(Manager::CacheType::Plain, initialSize, true); + + for (uint64_t i = 0; i < 4ULL * 1024ULL * 1024ULL; i++) { + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t)); + bool success = cache->insert(value); + if (!success) { + delete value; + } + } + + BOOST_CHECK(cache->usage() > minimumSize); + + manager.destroyCache(cache); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test shrink behavior (single-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_st_shrink) { + uint64_t initialSize = 16ULL * 1024ULL; + RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE); + MockScheduler scheduler(4); + Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL); + auto cache = + manager.createCache(Manager::CacheType::Plain, initialSize, true); + + for (uint64_t i = 0; i < 16ULL * 1024ULL * 1024ULL; i++) { + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t)); + bool success = cache->insert(value); + if (!success) { + delete value; + } + } + + uint64_t target = cache->usage() / 2; + while (!cache->resize(target)) { + }; + + for (uint64_t i = 0; i < 16ULL * 1024ULL * 1024ULL; i++) { + CachedValue* value = + CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t)); + bool success = cache->insert(value); + if (!success) { + delete value; + } + } + + uint64_t lastUsage = cache->usage(); + while (true) { + usleep(10000); + if (cache->usage() == lastUsage) { + break; + } + lastUsage = cache->usage(); + } + BOOST_CHECK_MESSAGE(cache->usage() <= target, + cache->usage() << " !<= " << target); + + manager.destroyCache(cache); + RandomGenerator::shutdown(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test mixed load behavior (multi-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_mt_mixed_load) { + uint64_t initialSize = 16ULL * 1024ULL; + RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE); + MockScheduler scheduler(4); + Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL); + size_t threadCount = 4; + std::shared_ptr cache = + manager.createCache(Manager::CacheType::Plain, initialSize, true); + + uint64_t chunkSize = 16 * 1024 * 1024; + uint64_t initialInserts = 4 * 1024 * 1024; + uint64_t operationCount = 16 * 1024 * 1024; + std::atomic hitCount(0); + std::atomic missCount(0); + auto worker = [&manager, &cache, initialInserts, operationCount, &hitCount, + &missCount](uint64_t lower, uint64_t upper) -> void { + // fill with some initial data + for (uint64_t i = 0; i < initialInserts; i++) { + uint64_t item = lower + i; + CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t), + &item, sizeof(uint64_t)); + bool ok = cache->insert(value); + if (!ok) { + delete value; + } + } + + // initialize valid range for keys that *might* be in cache + uint64_t validLower = lower; + uint64_t validUpper = lower + initialInserts - 1; + + // commence mixed workload + for (uint64_t i = 0; i < operationCount; i++) { + uint32_t r = RandomGenerator::interval(static_cast(99UL)); + + if (r >= 99) { // remove something + if (validLower == validUpper) { + continue; // removed too much + } + + uint64_t item = validLower++; + + cache->remove(&item, sizeof(uint64_t)); + } else if (r >= 95) { // insert something + if (validUpper == upper) { + continue; // already maxed out range + } + + uint64_t item = ++validUpper; + CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t), + &item, sizeof(uint64_t)); + bool ok = cache->insert(value); + if (!ok) { + delete value; + } + } else { // lookup something + uint64_t item = RandomGenerator::interval( + static_cast(validLower), static_cast(validUpper)); + + Cache::Finding f = cache->find(&item, sizeof(uint64_t)); + if (f.found()) { + hitCount++; + TRI_ASSERT(f.value() != nullptr); + TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t))); + } else { + missCount++; + TRI_ASSERT(f.value() == nullptr); + } + } + } + }; + + std::vector threads; + // dispatch threads + for (size_t i = 0; i < threadCount; i++) { + uint64_t lower = i * chunkSize; + uint64_t upper = ((i + 1) * chunkSize) - 1; + threads.push_back(new std::thread(worker, lower, upper)); + } + + // join threads + for (auto t : threads) { + t->join(); + delete t; + } + + manager.destroyCache(cache); + RandomGenerator::shutdown(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/Rebalancer.cpp b/UnitTests/Cache/Rebalancer.cpp new file mode 100644 index 0000000000..65eff5478b --- /dev/null +++ b/UnitTests/Cache/Rebalancer.cpp @@ -0,0 +1,203 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::Manager +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" +#include "Random/RandomGenerator.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/Manager.h" +#include "Cache/PlainCache.h" +#include "Cache/Rebalancer.h" + +#include "MockScheduler.h" + +#include +#include +#include +#include +#include + +using namespace arangodb; +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheRebalancerSetup { + CCacheRebalancerSetup() { BOOST_TEST_MESSAGE("setup Rebalancer"); } + + ~CCacheRebalancerSetup() { BOOST_TEST_MESSAGE("tear-down Rebalancer"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheRebalancerTest, CCacheRebalancerSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test rebalancing (multi-threaded) +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_rebalancing) { + uint64_t initialSize = 16ULL * 1024ULL; + RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE); + MockScheduler scheduler(4); + Manager manager(scheduler.ioService(), 128ULL * 1024ULL * 1024ULL); + Rebalancer rebalancer(&manager); + + size_t cacheCount = 4; + size_t threadCount = 4; + std::vector> caches; + for (size_t i = 0; i < cacheCount; i++) { + caches.emplace_back( + manager.createCache(Manager::CacheType::Plain, initialSize, true)); + } + + bool doneRebalancing = false; + auto rebalanceWorker = [&rebalancer, &doneRebalancing]() -> void { + while (!doneRebalancing) { + bool rebalanced = rebalancer.rebalance(); + if (rebalanced) { + usleep(500 * 1000); + } else { + usleep(100); + } + } + }; + auto rebalancerThread = new std::thread(rebalanceWorker); + + uint64_t chunkSize = 4 * 1024 * 1024; + uint64_t initialInserts = 1 * 1024 * 1024; + uint64_t operationCount = 4 * 1024 * 1024; + std::atomic hitCount(0); + std::atomic missCount(0); + auto worker = [&manager, &caches, cacheCount, initialInserts, operationCount, + &hitCount, + &missCount](uint64_t lower, uint64_t upper) -> void { + // fill with some initial data + for (uint64_t i = 0; i < initialInserts; i++) { + uint64_t item = lower + i; + size_t cacheIndex = item % cacheCount; + CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t), + &item, sizeof(uint64_t)); + bool ok = caches[cacheIndex]->insert(value); + if (!ok) { + delete value; + } + } + + // initialize valid range for keys that *might* be in cache + uint64_t validLower = lower; + uint64_t validUpper = lower + initialInserts - 1; + + // commence mixed workload + for (uint64_t i = 0; i < operationCount; i++) { + uint32_t r = RandomGenerator::interval(static_cast(99UL)); + + if (r >= 99) { // remove something + if (validLower == validUpper) { + continue; // removed too much + } + + uint64_t item = validLower++; + size_t cacheIndex = item % cacheCount; + + caches[cacheIndex]->remove(&item, sizeof(uint64_t)); + } else if (r >= 95) { // insert something + if (validUpper == upper) { + continue; // already maxed out range + } + + uint64_t item = ++validUpper; + size_t cacheIndex = item % cacheCount; + CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t), + &item, sizeof(uint64_t)); + bool ok = caches[cacheIndex]->insert(value); + if (!ok) { + delete value; + } + } else { // lookup something + uint64_t item = RandomGenerator::interval( + static_cast(validLower), static_cast(validUpper)); + size_t cacheIndex = item % cacheCount; + + Cache::Finding f = caches[cacheIndex]->find(&item, sizeof(uint64_t)); + if (f.found()) { + hitCount++; + TRI_ASSERT(f.value() != nullptr); + TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t))); + } else { + missCount++; + TRI_ASSERT(f.value() == nullptr); + } + } + } + }; + + std::vector threads; + // dispatch threads + for (size_t i = 0; i < threadCount; i++) { + uint64_t lower = i * chunkSize; + uint64_t upper = ((i + 1) * chunkSize) - 1; + threads.push_back(new std::thread(worker, lower, upper)); + } + + // join threads + for (auto t : threads) { + t->join(); + delete t; + } + + doneRebalancing = true; + rebalancerThread->join(); + delete rebalancerThread; + + for (auto cache : caches) { + manager.destroyCache(cache); + } + + RandomGenerator::shutdown(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/Runner.cpp b/UnitTests/Cache/Runner.cpp new file mode 100644 index 0000000000..b742ba3884 --- /dev/null +++ b/UnitTests/Cache/Runner.cpp @@ -0,0 +1,2 @@ +#define BOOST_TEST_MODULE "C/C++ Unit Tests for ArangoDB Cache" +#include diff --git a/UnitTests/Cache/State.cpp b/UnitTests/Cache/State.cpp new file mode 100644 index 0000000000..43aac0a13e --- /dev/null +++ b/UnitTests/Cache/State.cpp @@ -0,0 +1,138 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::State +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/State.h" + +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheStateSetup { + CCacheStateSetup() { BOOST_TEST_MESSAGE("setup State"); } + + ~CCacheStateSetup() { BOOST_TEST_MESSAGE("tear-down State"); } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheStateTest, CCacheStateSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test lock methods +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_lock) { + State state; + bool success; + + uint32_t outsideState = 0; + + auto cb1 = [&outsideState]() -> void { outsideState = 1; }; + + auto cb2 = [&outsideState]() -> void { outsideState = 2; }; + + // check lock without contention + BOOST_CHECK(!state.isLocked()); + success = state.lock(-1, cb1); + BOOST_CHECK(success); + BOOST_CHECK(state.isLocked()); + BOOST_CHECK_EQUAL(1UL, outsideState); + + // check lock with contention + success = state.lock(10LL, cb2); + BOOST_CHECK(!success); + BOOST_CHECK(state.isLocked()); + BOOST_CHECK_EQUAL(1UL, outsideState); + + // check unlock + state.unlock(); + BOOST_CHECK(!state.isLocked()); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test methods for non-lock flags +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_flags) { + State state; + bool success; + + success = state.lock(); + BOOST_CHECK(success); + BOOST_CHECK(!state.isSet(State::Flag::migrated)); + state.unlock(); + + success = state.lock(); + BOOST_CHECK(success); + BOOST_CHECK(!state.isSet(State::Flag::migrated)); + state.toggleFlag(State::Flag::migrated); + BOOST_CHECK(state.isSet(State::Flag::migrated)); + state.unlock(); + + success = state.lock(); + BOOST_CHECK(success); + BOOST_CHECK(state.isSet(State::Flag::migrated)); + state.unlock(); + + success = state.lock(); + BOOST_CHECK(success); + BOOST_CHECK(state.isSet(State::Flag::migrated)); + state.toggleFlag(State::Flag::migrated); + BOOST_CHECK(!state.isSet(State::Flag::migrated)); + state.unlock(); + + success = state.lock(); + BOOST_CHECK(success); + BOOST_CHECK(!state.isSet(State::Flag::migrated)); + state.unlock(); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/TransactionWindow.cpp b/UnitTests/Cache/TransactionWindow.cpp new file mode 100644 index 0000000000..94f85acab1 --- /dev/null +++ b/UnitTests/Cache/TransactionWindow.cpp @@ -0,0 +1,98 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::TransactionWindow +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/TransactionWindow.h" + +#include +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheTransactionWindowSetup { + CCacheTransactionWindowSetup() { + BOOST_TEST_MESSAGE("setup TransactionWindow"); + } + + ~CCacheTransactionWindowSetup() { + BOOST_TEST_MESSAGE("tear-down TransactionWindow"); + } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheTransactionWindowTest, + CCacheTransactionWindowSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test transaction term management +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_transaction_term) { + TransactionWindow transactions; + + BOOST_CHECK_EQUAL(0ULL, transactions.term()); + + transactions.start(); + BOOST_CHECK_EQUAL(1ULL, transactions.term()); + transactions.end(); + BOOST_CHECK_EQUAL(2ULL, transactions.term()); + + transactions.start(); + BOOST_CHECK_EQUAL(3ULL, transactions.term()); + transactions.start(); + BOOST_CHECK_EQUAL(3ULL, transactions.term()); + transactions.end(); + BOOST_CHECK_EQUAL(3ULL, transactions.term()); + transactions.end(); + BOOST_CHECK_EQUAL(4ULL, transactions.term()); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/UnitTests/Cache/TransactionalBucket.cpp b/UnitTests/Cache/TransactionalBucket.cpp new file mode 100644 index 0000000000..590a4b1e85 --- /dev/null +++ b/UnitTests/Cache/TransactionalBucket.cpp @@ -0,0 +1,356 @@ +//////////////////////////////////////////////////////////////////////////////// +/// @brief test suite for arangodb::cache::TransactionalBucket +/// +/// @file +/// +/// DISCLAIMER +/// +/// Copyright 2017 triagens GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is triAGENS GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany +//////////////////////////////////////////////////////////////////////////////// + +#include "Basics/Common.h" + +#define BOOST_TEST_INCLUDED +#include + +#include "Cache/TransactionalBucket.h" + +#include +#include + +using namespace arangodb::cache; + +// ----------------------------------------------------------------------------- +// --SECTION-- setup / tear-down +// ----------------------------------------------------------------------------- + +struct CCacheTransactionalBucketSetup { + CCacheTransactionalBucketSetup() { + BOOST_TEST_MESSAGE("setup TransactionalBucket"); + } + + ~CCacheTransactionalBucketSetup() { + BOOST_TEST_MESSAGE("tear-down TransactionalBucket"); + } +}; +// ----------------------------------------------------------------------------- +// --SECTION-- test suite +// ----------------------------------------------------------------------------- + +//////////////////////////////////////////////////////////////////////////////// +/// @brief setup +//////////////////////////////////////////////////////////////////////////////// + +BOOST_FIXTURE_TEST_SUITE(CCacheTransactionalBucketTest, + CCacheTransactionalBucketSetup) + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test lock methods +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_locks) { + TransactionalBucket bucket; + bool success; + + // check lock without contention + BOOST_CHECK(!bucket.isLocked()); + success = bucket.lock(0ULL, -1LL); + BOOST_CHECK(success); + BOOST_CHECK(bucket.isLocked()); + + // check lock with contention + success = bucket.lock(0ULL, 10LL); + BOOST_CHECK(!success); + BOOST_CHECK(bucket.isLocked()); + + // check unlock + bucket.unlock(); + BOOST_CHECK(!bucket.isLocked()); + + // check that blacklist term is updated appropriately + BOOST_CHECK_EQUAL(0ULL, bucket._blacklistTerm); + bucket.lock(1ULL, -1LL); + BOOST_CHECK_EQUAL(1ULL, bucket._blacklistTerm); + bucket.unlock(); + BOOST_CHECK_EQUAL(1ULL, bucket._blacklistTerm); +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test insertion to full and fail beyond +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_insertion) { + TransactionalBucket bucket; + bool success; + + uint32_t hashes[4] = { + 1, 2, 3, 4}; // don't have to be real, but should be unique and non-zero + uint64_t keys[4] = {0, 1, 2, 3}; + uint64_t values[4] = {0, 1, 2, 3}; + CachedValue* ptrs[4]; + for (size_t i = 0; i < 4; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(0, -1LL); + BOOST_CHECK(success); + + // insert three to fill + BOOST_CHECK(!bucket.isFull()); + for (size_t i = 0; i < 3; i++) { + bucket.insert(hashes[i], ptrs[i]); + if (i < 2) { + BOOST_CHECK(!bucket.isFull()); + } else { + BOOST_CHECK(bucket.isFull()); + } + } + for (size_t i = 0; i < 3; i++) { + CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + // check that insert is ignored if full + bucket.insert(hashes[3], ptrs[3]); + CachedValue* res = bucket.find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize); + BOOST_CHECK(nullptr == res); + + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 4; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test removal +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_removal) { + TransactionalBucket bucket; + bool success; + + uint32_t hashes[3] = { + 1, 2, 3}; // don't have to be real, but should be unique and non-zero + uint64_t keys[3] = {0, 1, 2}; + uint64_t values[3] = {0, 1, 2}; + CachedValue* ptrs[3]; + for (size_t i = 0; i < 3; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(0, -1LL); + BOOST_CHECK(success); + + for (size_t i = 0; i < 3; i++) { + bucket.insert(hashes[i], ptrs[i]); + } + for (size_t i = 0; i < 3; i++) { + CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + CachedValue* res; + res = bucket.remove(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(res == ptrs[1]); + res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(nullptr == res); + res = bucket.remove(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(res == ptrs[0]); + res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(nullptr == res); + res = bucket.remove(hashes[2], ptrs[2]->key(), ptrs[2]->keySize); + BOOST_CHECK(res == ptrs[2]); + res = bucket.find(hashes[2], ptrs[2]->key(), ptrs[2]->keySize); + BOOST_CHECK(nullptr == res); + + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 3; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test eviction with subsequent insertion +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_eviction) { + TransactionalBucket bucket; + bool success; + + uint32_t hashes[4] = { + 1, 2, 3, 4}; // don't have to be real, but should be unique and non-zero + uint64_t keys[4] = {0, 1, 2, 3}; + uint64_t values[4] = {0, 1, 2, 3}; + CachedValue* ptrs[4]; + for (size_t i = 0; i < 4; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(0, -1LL); + BOOST_CHECK(success); + + // insert three to fill + BOOST_CHECK(!bucket.isFull()); + for (size_t i = 0; i < 3; i++) { + bucket.insert(hashes[i], ptrs[i]); + if (i < 2) { + BOOST_CHECK(!bucket.isFull()); + } else { + BOOST_CHECK(bucket.isFull()); + } + } + for (size_t i = 0; i < 3; i++) { + CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + // check that we get proper eviction candidate + CachedValue* candidate = bucket.evictionCandidate(); + BOOST_CHECK(candidate == ptrs[0]); + bucket.evict(candidate, false); + CachedValue* res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(nullptr == res); + BOOST_CHECK(!bucket.isFull()); + + // check that we still find the right candidate if not full + candidate = bucket.evictionCandidate(); + BOOST_CHECK(candidate == ptrs[1]); + bucket.evict(candidate, true); + res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(nullptr == res); + BOOST_CHECK(!bucket.isFull()); + + // check that we can insert now after eviction optimized for insertion + bucket.insert(hashes[3], ptrs[3]); + res = bucket.find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize); + BOOST_CHECK(res == ptrs[3]); + + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 4; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test blacklist methods +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_CASE(tst_blacklist) { + TransactionalBucket bucket; + bool success; + CachedValue* res; + + uint32_t hashes[7] = {1, 1, 2, 3, + 4, 5, 6}; // don't have to be real, want some overlap + uint64_t keys[6] = {0, 1, 2, 3, 4, 5}; + uint64_t values[6] = {0, 1, 2, 3, 4, 5}; + CachedValue* ptrs[6]; + for (size_t i = 0; i < 6; i++) { + ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]), + sizeof(uint64_t)); + } + + success = bucket.lock(0, -1LL); + BOOST_CHECK(success); + + // insert three to fill + BOOST_CHECK(!bucket.isFull()); + for (size_t i = 0; i < 3; i++) { + bucket.insert(hashes[i], ptrs[i]); + if (i < 2) { + BOOST_CHECK(!bucket.isFull()); + } else { + BOOST_CHECK(bucket.isFull()); + } + } + for (size_t i = 0; i < 3; i++) { + res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(res == ptrs[i]); + } + + // blacklist 1-4 to fill blacklist + for (size_t i = 1; i < 5; i++) { + bucket.blacklist(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + } + for (size_t i = 1; i < 5; i++) { + BOOST_CHECK(bucket.isBlacklisted(hashes[i])); + res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize); + BOOST_CHECK(nullptr == res); + } + // verify actually not fully blacklisted + BOOST_CHECK(!bucket.isFullyBlacklisted()); + BOOST_CHECK(!bucket.isBlacklisted(hashes[6])); + // verify it didn't remove matching hash with non-matching key + res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(res == ptrs[0]); + + // verify we can't insert a key with a blacklisted hash + bucket.insert(hashes[1], ptrs[1]); + res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize); + BOOST_CHECK(nullptr == res); + + // proceed to fully blacklist + bucket.blacklist(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); + BOOST_CHECK(bucket.isBlacklisted(hashes[5])); + res = bucket.find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize); + BOOST_CHECK(nullptr == res); + // make sure it still didn't remove non-matching key + res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize); + BOOST_CHECK(ptrs[0] == res); + // make sure it's fully blacklisted + BOOST_CHECK(bucket.isFullyBlacklisted()); + BOOST_CHECK(bucket.isBlacklisted(hashes[6])); + + bucket.unlock(); + + // check that updating blacklist term clears blacklist + bucket.lock(2ULL, -1LL); + BOOST_CHECK(!bucket.isFullyBlacklisted()); + for (size_t i = 0; i < 7; i++) { + BOOST_CHECK(!bucket.isBlacklisted(hashes[i])); + } + bucket.unlock(); + + // cleanup + for (size_t i = 0; i < 6; i++) { + delete ptrs[i]; + } +} + +//////////////////////////////////////////////////////////////////////////////// +/// @brief generate tests +//////////////////////////////////////////////////////////////////////////////// + +BOOST_AUTO_TEST_SUITE_END() + +// Local Variables: +// mode: outline-minor +// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|// +// --SECTION--\\|/// @\\}\\)" +// End: diff --git a/arangod/Agency/AddFollower.cpp b/arangod/Agency/AddFollower.cpp index 843ef3caaa..e421591bb5 100644 --- a/arangod/Agency/AddFollower.cpp +++ b/arangod/Agency/AddFollower.cpp @@ -25,13 +25,7 @@ #include "Agency/Agent.h" #include "Agency/Job.h" -#include -#include -#include -#include - using namespace arangodb::consensus; -using namespace arangodb::velocypack; AddFollower::AddFollower(Node const& snapshot, Agent* agent, std::string const& jobId, std::string const& creator, diff --git a/arangod/Agency/AgencyComm.cpp b/arangod/Agency/AgencyComm.cpp index 99190c6c7e..80fb7b90b0 100644 --- a/arangod/Agency/AgencyComm.cpp +++ b/arangod/Agency/AgencyComm.cpp @@ -1,4 +1,4 @@ -//////////////////////////////////////////////////////////////////////////////// +/////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// /// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany @@ -811,7 +811,7 @@ bool AgencyComm::exists(std::string const& key) { return false; } - auto parts = arangodb::basics::StringUtils::split(key, "/"); + auto parts = basics::StringUtils::split(key, "/"); std::vector allParts; allParts.reserve(parts.size() + 1); allParts.push_back(AgencyCommManager::path()); @@ -1130,7 +1130,7 @@ bool AgencyComm::ensureStructureInitialized() { std::vector({AgencyCommManager::path(), "Secret"})); if (!secretValue.isString()) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Couldn't find secret in agency!"; + LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't find secret in agency!"; return false; } std::string const secret = secretValue.copyString(); @@ -1489,16 +1489,7 @@ AgencyCommResult AgencyComm::send( << "': " << body; arangodb::httpclient::SimpleHttpClient client(connection, timeout, false); - auto cc = ClusterComm::instance(); - if (cc == nullptr) { - // nullptr only happens during controlled shutdown - result._message = "could not send request to agency because of shutdown"; - LOG_TOPIC(TRACE, Logger::AGENCYCOMM) - << "could not send request to agency because of shutdown"; - - return result; - } - client.setJwt(cc->jwt()); + client.setJwt(ClusterComm::instance()->jwt()); client.keepConnectionOnDestruction(true); // set up headers @@ -1699,10 +1690,10 @@ bool AgencyComm::tryInitializeStructure(std::string const& jwtSecret) { return result.successful(); } catch (std::exception const& e) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Fatal error initializing agency " << e.what(); + LOG_TOPIC(FATAL, Logger::CLUSTER) << "Fatal error initializing agency " << e.what(); FATAL_ERROR_EXIT(); } catch (...) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Fatal error initializing agency"; + LOG_TOPIC(FATAL, Logger::CLUSTER) << "Fatal error initializing agency"; FATAL_ERROR_EXIT(); } } diff --git a/arangod/Agency/AgencyComm.h b/arangod/Agency/AgencyComm.h index fcbf54b281..45edd12a84 100644 --- a/arangod/Agency/AgencyComm.h +++ b/arangod/Agency/AgencyComm.h @@ -629,6 +629,14 @@ class AgencyComm { void updateEndpoints(arangodb::velocypack::Slice const&); + bool lockRead(std::string const&, double, double); + + bool lockWrite(std::string const&, double, double); + + bool unlockRead(std::string const&, double); + + bool unlockWrite(std::string const&, double); + AgencyCommResult sendTransactionWithFailover(AgencyTransaction const&, double timeout = 0.0); diff --git a/arangod/Agency/Agent.cpp b/arangod/Agency/Agent.cpp index ffec34056f..36f85fd039 100644 --- a/arangod/Agency/Agent.cpp +++ b/arangod/Agency/Agent.cpp @@ -1,3 +1,4 @@ + //////////////////////////////////////////////////////////////////////////////// /// DISCLAIMER /// @@ -47,15 +48,18 @@ Agent::Agent(config_t const& config) _config(config), _lastCommitIndex(0), _lastAppliedIndex(0), + _lastCompactionIndex(0), _leaderCommitIndex(0), _spearhead(this), _readDB(this), _transient(this), + _compacted(this), _nextCompationAfter(_config.compactionStepSize()), _inception(std::make_unique(this)), _activator(nullptr), _compactor(this), - _ready(false) { + _ready(false), + _preparing(false) { _state.configure(this); _constituent.configure(this); } @@ -153,7 +157,7 @@ std::string Agent::leaderID() const { /// Are we leading? bool Agent::leading() const { - return _constituent.leading(); + return _preparing || _constituent.leading(); } /// Start constituent personality @@ -272,14 +276,14 @@ bool Agent::recvAppendEntriesRPC( // Update commit index if (queries->slice().type() != VPackValueType::Array) { - LOG_TOPIC(WARN, Logger::AGENCY) + LOG_TOPIC(DEBUG, Logger::AGENCY) << "Received malformed entries for appending. Discarding!"; return false; } if (!_constituent.checkLeader(term, leaderId, prevIndex, prevTerm)) { - LOG_TOPIC(WARN, Logger::AGENCY) << "Not accepting appendEntries from " - << leaderId; + LOG_TOPIC(DEBUG, Logger::AGENCY) + << "Not accepting appendEntries from " << leaderId; return false; } @@ -324,8 +328,8 @@ bool Agent::recvAppendEntriesRPC( /// Leader's append entries void Agent::sendAppendEntriesRPC() { - std::chrono::duration> const dt ( - (_config.waitForSync() ? 40000 : 2000)); + std::chrono::duration> const dt ( + (_config.waitForSync() ? 40 : 2)); auto cc = ClusterComm::instance(); if (cc == nullptr) { // nullptr only happens during controlled shutdown @@ -351,12 +355,6 @@ void Agent::sendAppendEntriesRPC() { std::vector unconfirmed = _state.get(last_confirmed); - if (unconfirmed.empty()) { - // this can only happen if the log is totally empty (I think, Max) - // and so it is OK, to skip the time check here - continue; - } - index_t highest = unconfirmed.back().index; // _lastSent, _lastHighest: local and single threaded access @@ -378,7 +376,8 @@ void Agent::sendAppendEntriesRPC() { // Body Builder builder; builder.add(VPackValue(VPackValueType::Array)); - if ((system_clock::now() - _earliestPackage[followerId]).count() > 0) { + if (!_preparing && + ((system_clock::now() - _earliestPackage[followerId]).count() > 0)) { for (size_t i = 1; i < unconfirmed.size(); ++i) { auto const& entry = unconfirmed.at(i); builder.add(VPackValue(VPackValueType::Object)); @@ -413,8 +412,9 @@ void Agent::sendAppendEntriesRPC() { "1", 1, _config.poolAt(followerId), arangodb::rest::RequestType::POST, path.str(), std::make_shared(builder.toJson()), headerFields, - std::make_shared(this, followerId, highest, toLog), - 5.0 * _config.maxPing(), true); + std::make_shared( + this, followerId, (toLog) ? highest : 0, toLog), + std::max(1.0e-3 * toLog * dt.count(), 0.25 * _config.minPing()), true); // _lastSent, _lastHighest: local and single threaded access _lastSent[followerId] = system_clock::now(); @@ -422,7 +422,7 @@ void Agent::sendAppendEntriesRPC() { if (toLog > 0) { _earliestPackage[followerId] = system_clock::now() + toLog * dt; - LOG_TOPIC(TRACE, Logger::AGENCY) + LOG_TOPIC(DEBUG, Logger::AGENCY) << "Appending " << unconfirmed.size() - 1 << " entries up to index " << highest << " to follower " << followerId << ". Message: " << builder.toJson() @@ -430,7 +430,7 @@ void Agent::sendAppendEntriesRPC() { << std::chrono::duration( _earliestPackage[followerId]-system_clock::now()).count() << "ms"; } else { - LOG_TOPIC(TRACE, Logger::AGENCY) + LOG_TOPIC(DEBUG, Logger::AGENCY) << "Just keeping follower " << followerId << " devout with " << builder.toJson(); } @@ -837,7 +837,7 @@ void Agent::run() { sendAppendEntriesRPC(); // Don't panic - _appendCV.wait(1000); + _appendCV.wait(100); // Detect faulty agent and replace // if possible and only if not already activating @@ -1000,6 +1000,7 @@ void Agent::beginShutdown() { void Agent::prepareLead() { + _preparing = true; // Key value stores rebuildDBs(); @@ -1020,9 +1021,11 @@ void Agent::lead() { // Wake up run { CONDITION_LOCKER(guard, _appendCV); + _preparing = false; guard.broadcast(); } + // Agency configuration term_t myterm; { @@ -1169,17 +1172,23 @@ arangodb::consensus::index_t Agent::rebuildDBs() { // Apply logs from last applied index to leader's commit index LOG_TOPIC(DEBUG, Logger::AGENCY) - << "Rebuilding kvstores from index " + << "Rebuilding key-value stores from index " << _lastAppliedIndex << " to " << _leaderCommitIndex; _spearhead.apply( - _state.slices(_lastAppliedIndex+1, _leaderCommitIndex), + _state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1), _leaderCommitIndex, _constituent.term()); _readDB.apply( - _state.slices(_lastAppliedIndex+1, _leaderCommitIndex), + _state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1), _leaderCommitIndex, _constituent.term()); + + _compacted.apply( + _state.slices(_lastCompactionIndex+1, _leaderCommitIndex+1), + _leaderCommitIndex, _constituent.term()); + _lastAppliedIndex = _leaderCommitIndex; + _lastCompactionIndex = _leaderCommitIndex; return _lastAppliedIndex; @@ -1195,9 +1204,11 @@ void Agent::compact() { /// Last commit index -arangodb::consensus::index_t Agent::lastCommitted() const { +std::pair + Agent::lastCommitted() const { MUTEX_LOCKER(ioLocker, _ioLock); - return _lastCommitIndex; + return std::pair( + _lastCommitIndex,_leaderCommitIndex); } /// Last commit index @@ -1382,8 +1393,42 @@ bool Agent::ready() const { return true; } - return _ready.load(); + return _ready; + } +query_t Agent::buildDB(arangodb::consensus::index_t index) { + + auto builder = std::make_shared(); + arangodb::consensus::index_t start = 0, end = 0; + + Store store(this); + { + + MUTEX_LOCKER(ioLocker, _ioLock); + store = _compacted; + + MUTEX_LOCKER(liLocker, _liLock); + end = _leaderCommitIndex; + start = _lastCompactionIndex+1; + + } + + if (index > end) { + LOG_TOPIC(INFO, Logger::AGENCY) + << "Cannot snapshot beyond leaderCommitIndex: " << end; + index = end; + } else if (index < start) { + LOG_TOPIC(INFO, Logger::AGENCY) + << "Cannot snapshot before last compaction index: " << start; + index = start+1; + } + + store.apply(_state.slices(start+1, index), index, _constituent.term()); + store.toBuilder(*builder); + + return builder; + +} }} // namespace diff --git a/arangod/Agency/Agent.h b/arangod/Agency/Agent.h index 1d2ab55936..fbc8ae2403 100644 --- a/arangod/Agency/Agent.h +++ b/arangod/Agency/Agent.h @@ -77,7 +77,7 @@ class Agent : public arangodb::Thread { bool fitness() const; /// @brief Leader ID - index_t lastCommitted() const; + std::pair lastCommitted() const; /// @brief Leader ID std::string leaderID() const; @@ -222,6 +222,9 @@ class Agent : public arangodb::Thread { /// @brief Update a peers endpoint in my configuration void updatePeerEndpoint(std::string const& id, std::string const& ep); + /// @brief Assemble an agency to commitId + query_t buildDB(index_t); + /// @brief State reads persisted state and prepares the agent friend class State; friend class Compactor; @@ -270,6 +273,9 @@ class Agent : public arangodb::Thread { /// @brief Last compaction index index_t _lastAppliedIndex; + /// @brief Last compaction index + index_t _lastCompactionIndex; + /// @brief Last compaction index index_t _leaderCommitIndex; @@ -282,6 +288,9 @@ class Agent : public arangodb::Thread { /// @brief Committed (read) kv-store Store _transient; + /// @brief Last compacted store + Store _compacted; + /// @brief Condition variable for appendEntries arangodb::basics::ConditionVariable _appendCV; @@ -326,6 +335,7 @@ class Agent : public arangodb::Thread { /// @brief Agent is ready for RAFT std::atomic _ready; + std::atomic _preparing; /// @brief Keep track of when I last took on leadership TimePoint _leaderSince; diff --git a/arangod/Agency/AgentCallback.cpp b/arangod/Agency/AgentCallback.cpp index 691a6c543b..2208457ea5 100644 --- a/arangod/Agency/AgentCallback.cpp +++ b/arangod/Agency/AgentCallback.cpp @@ -39,23 +39,43 @@ AgentCallback::AgentCallback(Agent* agent, std::string const& slaveID, void AgentCallback::shutdown() { _agent = 0; } bool AgentCallback::operator()(arangodb::ClusterCommResult* res) { + if (res->status == CL_COMM_SENT) { + if (_agent) { - _agent->reportIn(_slaveID, _last, _toLog); + + try { // Check success + if (res->result->getBodyVelocyPack()->slice().get("success").getBool()) { + _agent->reportIn(_slaveID, _last, _toLog); + } + LOG_TOPIC(DEBUG, Logger::CLUSTER) + << "success: true " << res->result->getBodyVelocyPack()->toJson(); + } catch (...) { + LOG_TOPIC(INFO, Logger::CLUSTER) + << "success: false" << res->result->getBodyVelocyPack()->toJson(); + } + + } + LOG_TOPIC(DEBUG, Logger::AGENCY) << "Got good callback from AppendEntriesRPC: " << "comm_status(" << res->status << "), last(" << _last << "), follower(" << _slaveID << "), time(" << TRI_microtime() - _startTime << ")"; + } else { + LOG_TOPIC(DEBUG, Logger::AGENCY) << "Got bad callback from AppendEntriesRPC: " << "comm_status(" << res->status << "), last(" << _last << "), follower(" << _slaveID << "), time(" << TRI_microtime() - _startTime << ")"; + } + return true; + } diff --git a/arangod/Agency/CleanOutServer.cpp b/arangod/Agency/CleanOutServer.cpp index 9fa87616b0..a49ce97f8c 100644 --- a/arangod/Agency/CleanOutServer.cpp +++ b/arangod/Agency/CleanOutServer.cpp @@ -28,7 +28,6 @@ #include "Agency/MoveShard.h" using namespace arangodb::consensus; -using namespace arangodb::velocypack; CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent, std::string const& jobId, diff --git a/arangod/Agency/Constituent.cpp b/arangod/Agency/Constituent.cpp index 31a5869c03..7effa6d082 100644 --- a/arangod/Agency/Constituent.cpp +++ b/arangod/Agency/Constituent.cpp @@ -146,6 +146,21 @@ void Constituent::termNoLock(term_t t) { } } +bool Constituent::logUpToDate( + arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) const { + log_t myLastLogEntry = _agent->state().lastLog(); + return (prevLogTerm > myLastLogEntry.term || + (prevLogTerm == myLastLogEntry.term && + prevLogIndex >= myLastLogEntry.index)); +} + + +bool Constituent::logMatches( + arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) const { + return _agent->state().has(prevLogIndex, prevLogTerm); +} + + /// My role role_t Constituent::role() const { MUTEX_LOCKER(guard, _castLock); @@ -257,8 +272,8 @@ std::string Constituent::endpoint(std::string id) const { } /// @brief Check leader -bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex, - term_t prevLogTerm) { +bool Constituent::checkLeader( + term_t term, std::string id, index_t prevLogIndex, term_t prevLogTerm) { TRI_ASSERT(_vocbase != nullptr); @@ -277,6 +292,11 @@ bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex, if (term > _term) { termNoLock(term); } + + if (!logMatches(prevLogIndex,prevLogTerm)) { + return false; + } + if (_leaderID != id) { LOG_TOPIC(DEBUG, Logger::AGENCY) << "Set _leaderID to " << id << " in term " << _term; @@ -421,7 +441,7 @@ void Constituent::callElection() { auto res = ClusterComm::instance()->wait( "", coordinatorTransactionID, 0, "", - duration(steady_clock::now()-timeout).count()); + duration(timeout - steady_clock::now()).count()); if (res.status == CL_COMM_SENT) { auto body = res.result->getBodyVelocyPack(); @@ -571,6 +591,11 @@ void Constituent::run() { if (_lastHeartbeatSeen > 0.0) { double now = TRI_microtime(); randWait -= static_cast(M * (now-_lastHeartbeatSeen)); + if (randWait < a) { + randWait = a; + } else if (randWait > b) { + randWait = b; + } } } diff --git a/arangod/Agency/Constituent.h b/arangod/Agency/Constituent.h index 2ef8a268f7..b8bb71db7a 100644 --- a/arangod/Agency/Constituent.h +++ b/arangod/Agency/Constituent.h @@ -126,6 +126,12 @@ class Constituent : public Thread { // Wait for sync bool waitForSync() const; + // Check if log up to date with ours + bool logUpToDate(index_t, term_t) const; + + // Check if log start matches entry in my log + bool logMatches(index_t, term_t) const; + // Sleep for how long duration_t sleepFor(double, double); diff --git a/arangod/Agency/FailedFollower.cpp b/arangod/Agency/FailedFollower.cpp index ab5e9eae09..6aa7e7539e 100644 --- a/arangod/Agency/FailedFollower.cpp +++ b/arangod/Agency/FailedFollower.cpp @@ -27,7 +27,6 @@ #include "Agency/Job.h" using namespace arangodb::consensus; -using namespace arangodb::velocypack; FailedFollower::FailedFollower(Node const& snapshot, Agent* agent, std::string const& jobId, @@ -125,6 +124,7 @@ bool FailedFollower::start() { Node const& planned = _snapshot(planPath); + // Copy todo to pending Builder todo, pending; diff --git a/arangod/Agency/FailedLeader.cpp b/arangod/Agency/FailedLeader.cpp index 4c5203cdd4..c314edbff0 100644 --- a/arangod/Agency/FailedLeader.cpp +++ b/arangod/Agency/FailedLeader.cpp @@ -30,7 +30,6 @@ #include using namespace arangodb::consensus; -using namespace arangodb::velocypack; FailedLeader::FailedLeader(Node const& snapshot, Agent* agent, std::string const& jobId, std::string const& creator, @@ -173,17 +172,23 @@ bool FailedLeader::start() { // Distribute shards like to come! std::vector planv; for (auto const& i : VPackArrayIterator(planned)) { - planv.push_back(i.copyString()); + auto s = i.copyString(); + if (s != _from && s != _to) { + planv.push_back(i.copyString()); + } } pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array)); + + pending.add(VPackValue(_to)); for (auto const& i : VPackArrayIterator(current)) { std::string s = i.copyString(); - if (s != _from) { + if (s != _from && s != _to) { pending.add(i); planv.erase(std::remove(planv.begin(), planv.end(), s), planv.end()); } } + pending.add(VPackValue(_from)); for (auto const& i : planv) { pending.add(VPackValue(i)); diff --git a/arangod/Agency/FailedServer.cpp b/arangod/Agency/FailedServer.cpp index 06079f9698..491b61c38c 100644 --- a/arangod/Agency/FailedServer.cpp +++ b/arangod/Agency/FailedServer.cpp @@ -30,7 +30,6 @@ #include "Agency/UnassumedLeadership.h" using namespace arangodb::consensus; -using namespace arangodb::velocypack; FailedServer::FailedServer(Node const& snapshot, Agent* agent, std::string const& jobId, std::string const& creator, @@ -286,7 +285,9 @@ JOB_STATUS FailedServer::status() { deleteTodos->openArray(); deleteTodos->openObject(); } - deleteTodos->add(_agencyPrefix + toDoPrefix + subJob.first, VPackValue(VPackValueType::Object)); + deleteTodos->add( + _agencyPrefix + toDoPrefix + subJob.first, + VPackValue(VPackValueType::Object)); deleteTodos->add("op", VPackValue("delete")); deleteTodos->close(); } else { @@ -302,7 +303,9 @@ JOB_STATUS FailedServer::status() { } if (deleteTodos) { - LOG_TOPIC(INFO, Logger::AGENCY) << "Server " << _server << " is healthy again. Will try to delete any jobs which have not yet started!"; + LOG_TOPIC(INFO, Logger::AGENCY) + << "Server " << _server << " is healthy again. Will try to delete" + "any jobs which have not yet started!"; deleteTodos->close(); deleteTodos->close(); // Transact to agency diff --git a/arangod/Agency/Inception.cpp b/arangod/Agency/Inception.cpp index 148532545a..0e62eccca2 100644 --- a/arangod/Agency/Inception.cpp +++ b/arangod/Agency/Inception.cpp @@ -36,7 +36,6 @@ #include using namespace arangodb::consensus; -using namespace arangodb::velocypack; Inception::Inception() : Thread("Inception"), _agent(nullptr) {} diff --git a/arangod/Agency/Job.cpp b/arangod/Agency/Job.cpp index e121fe43d6..24e56d14f6 100644 --- a/arangod/Agency/Job.cpp +++ b/arangod/Agency/Job.cpp @@ -24,7 +24,6 @@ #include "Job.h" using namespace arangodb::consensus; -using namespace arangodb::velocypack; bool arangodb::consensus::compareServerLists(Slice plan, Slice current) { if (!plan.isArray() || !current.isArray()) { diff --git a/arangod/Agency/Job.h b/arangod/Agency/Job.h index d42a315ebe..8826f90de1 100644 --- a/arangod/Agency/Job.h +++ b/arangod/Agency/Job.h @@ -28,7 +28,6 @@ #include "Node.h" #include "Supervision.h" -#include #include #include #include @@ -42,7 +41,7 @@ namespace consensus { // and all others followers. Both arguments must be arrays. Returns true, // if the first items in both slice are equal and if both arrays contain // the same set of strings. -bool compareServerLists(arangodb::velocypack::Slice plan, arangodb::velocypack::Slice current); +bool compareServerLists(Slice plan, Slice current); enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND }; const std::vector pos({"/Target/ToDo/", "/Target/Pending/", @@ -64,9 +63,9 @@ static std::string const plannedServers = "/Plan/DBServers"; static std::string const healthPrefix = "/Supervision/Health/"; inline arangodb::consensus::write_ret_t transact(Agent* _agent, - arangodb::velocypack::Builder const& transaction, + Builder const& transaction, bool waitForCommit = true) { - query_t envelope = std::make_shared(); + query_t envelope = std::make_shared(); try { envelope->openArray(); @@ -138,7 +137,7 @@ struct Job { std::string _creator; std::string _agencyPrefix; - std::shared_ptr _jb; + std::shared_ptr _jb; }; diff --git a/arangod/Agency/MoveShard.cpp b/arangod/Agency/MoveShard.cpp index 7a10d2740b..c38a08686d 100644 --- a/arangod/Agency/MoveShard.cpp +++ b/arangod/Agency/MoveShard.cpp @@ -29,7 +29,6 @@ static std::string const DBServer = "DBServer"; using namespace arangodb::consensus; -using namespace arangodb::velocypack; MoveShard::MoveShard(Node const& snapshot, Agent* agent, std::string const& jobId, std::string const& creator, diff --git a/arangod/Agency/Node.cpp b/arangod/Agency/Node.cpp index 8e94d6bb37..1dab13d96b 100644 --- a/arangod/Agency/Node.cpp +++ b/arangod/Agency/Node.cpp @@ -33,9 +33,8 @@ #include #include -using namespace arangodb::basics; using namespace arangodb::consensus; -using namespace arangodb::velocypack; +using namespace arangodb::basics; struct NotEmpty { bool operator()(const std::string& s) { return !s.empty(); } @@ -700,28 +699,6 @@ void Node::toBuilder(Builder& builder, bool showHidden) const { } } -void Node::toObject(Builder& builder, bool showHidden) const { - try { - if (type() == NODE) { - VPackObjectBuilder guard(&builder); - for (auto const& child : _children) { - if (child.first[0] == '.' && !showHidden) { - continue; - } - builder.add(VPackValue(child.first)); - child.second->toBuilder(builder); - } - } else { - if (!slice().isNone()) { - builder.add(slice()); - } - } - - } catch (std::exception const& e) { - LOG_TOPIC(ERR, Logger::AGENCY) << e.what() << " " << __FILE__ << __LINE__; - } -} - // Print internals to ostream std::ostream& Node::print(std::ostream& o) const { Node const* par = _parent; diff --git a/arangod/Agency/Node.h b/arangod/Agency/Node.h index c5c60cc460..4cf67d5e31 100644 --- a/arangod/Agency/Node.h +++ b/arangod/Agency/Node.h @@ -27,9 +27,6 @@ #include "AgencyCommon.h" #include -#include -#include -#include #include #include @@ -53,6 +50,8 @@ enum Operation { REPLACE }; +using namespace arangodb::velocypack; + class StoreException : public std::exception { public: explicit StoreException(std::string const& message) : _message(message) {} @@ -162,10 +161,7 @@ class Node { bool handle(arangodb::velocypack::Slice const&); /// @brief Create Builder representing this store - void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const; - - /// @brief Create Builder representing this store - void toObject(arangodb::velocypack::Builder&, bool showHidden = false) const; + void toBuilder(Builder&, bool showHidden = false) const; /// @brief Access children Children& children(); @@ -174,10 +170,10 @@ class Node { Children const& children() const; /// @brief Create slice from value - arangodb::velocypack::Slice slice() const; + Slice slice() const; /// @brief Get value type - arangodb::velocypack::ValueType valueType() const; + ValueType valueType() const; /// @brief Add observer for this node bool addObserver(std::string const&); @@ -222,7 +218,7 @@ class Node { std::string getString() const; /// @brief Get array value - arangodb::velocypack::Slice getArray() const; + Slice getArray() const; protected: /// @brief Add time to live entry @@ -238,8 +234,8 @@ class Node { Store* _store; ///< @brief Store Children _children; ///< @brief child nodes TimePoint _ttl; ///< @brief my expiry - std::vector> _value; ///< @brief my value - mutable arangodb::velocypack::Buffer _vecBuf; + std::vector> _value; ///< @brief my value + mutable Buffer _vecBuf; mutable bool _vecBufDirty; bool _isArray; }; diff --git a/arangod/Agency/RemoveServer.cpp b/arangod/Agency/RemoveServer.cpp index 8357a4aaf0..d2180a97da 100644 --- a/arangod/Agency/RemoveServer.cpp +++ b/arangod/Agency/RemoveServer.cpp @@ -27,7 +27,6 @@ #include "Agency/Job.h" using namespace arangodb::consensus; -using namespace arangodb::velocypack; RemoveServer::RemoveServer(Node const& snapshot, Agent* agent, std::string const& jobId, std::string const& creator, diff --git a/arangod/Agency/RestAgencyHandler.cpp b/arangod/Agency/RestAgencyHandler.cpp index abe8c767d9..975e662db8 100644 --- a/arangod/Agency/RestAgencyHandler.cpp +++ b/arangod/Agency/RestAgencyHandler.cpp @@ -32,12 +32,13 @@ #include "Basics/StaticStrings.h" #include "Logger/Logger.h" #include "Rest/HttpRequest.h" +#include "Rest/Version.h" using namespace arangodb; + using namespace arangodb::basics; -using namespace arangodb::consensus; using namespace arangodb::rest; -using namespace arangodb::velocypack; +using namespace arangodb::consensus; //////////////////////////////////////////////////////////////////////////////// /// @brief ArangoDB server @@ -218,6 +219,31 @@ RestStatus RestAgencyHandler::handleStores() { return RestStatus::DONE; } +RestStatus RestAgencyHandler::handleStore() { + + if (_request->requestType() == rest::RequestType::POST) { + + arangodb::velocypack::Options options; + auto query = _request->toVelocyPackBuilderPtr(&options); + arangodb::consensus::index_t index = 0; + + try { + index = query->slice().getUInt(); + } catch (...) { + index = _agent->lastCommitted().second; + } + + query_t builder = _agent->buildDB(index); + generateResult(rest::ResponseCode::OK, builder->slice()); + + } else { + generateError(rest::ResponseCode::BAD, 400); + } + + return RestStatus::DONE; + +} + RestStatus RestAgencyHandler::handleWrite() { if (_request->requestType() != rest::RequestType::POST) { @@ -624,12 +650,14 @@ RestStatus RestAgencyHandler::handleConfig() { } // Respond with configuration + auto last = _agent->lastCommitted(); Builder body; { VPackObjectBuilder b(&body); body.add("term", Value(_agent->term())); body.add("leaderId", Value(_agent->leaderID())); - body.add("lastCommitted", Value(_agent->lastCommitted())); + body.add("lastCommitted", Value(last.first)); + body.add("leaderCommitted", Value(last.second)); body.add("lastAcked", _agent->lastAckedAgo()->slice()); body.add("configuration", _agent->config().toBuilder()->slice()); } @@ -691,6 +719,8 @@ RestStatus RestAgencyHandler::execute() { return handleState(); } else if (suffixes[0] == "stores") { return handleStores(); + } else if (suffixes[0] == "store") { + return handleStore(); } else { return reportUnknownMethod(); } diff --git a/arangod/Agency/RestAgencyHandler.h b/arangod/Agency/RestAgencyHandler.h index d62d3cb756..cd4eef43fd 100644 --- a/arangod/Agency/RestAgencyHandler.h +++ b/arangod/Agency/RestAgencyHandler.h @@ -47,6 +47,7 @@ class RestAgencyHandler : public RestBaseHandler { RestStatus reportTooManySuffices(); RestStatus reportUnknownMethod(); RestStatus handleStores(); + RestStatus handleStore(); RestStatus handleRead(); RestStatus handleWrite(); RestStatus handleTransact(); diff --git a/arangod/Agency/RestAgencyPrivHandler.cpp b/arangod/Agency/RestAgencyPrivHandler.cpp index 5b17ff1fcd..34a73c0986 100644 --- a/arangod/Agency/RestAgencyPrivHandler.cpp +++ b/arangod/Agency/RestAgencyPrivHandler.cpp @@ -32,6 +32,7 @@ #include "Logger/Logger.h" #include "Rest/HttpRequest.h" +#include "Rest/Version.h" using namespace arangodb; diff --git a/arangod/Agency/State.cpp b/arangod/Agency/State.cpp index 87fcbbc257..c3cf1453b8 100644 --- a/arangod/Agency/State.cpp +++ b/arangod/Agency/State.cpp @@ -315,21 +315,66 @@ std::vector State::get(arangodb::consensus::index_t start, return entries; } - if (end == (std::numeric_limits::max)() || end > _log.size() - 1) { - end = _log.size() - 1; + if (end == (std::numeric_limits::max)() || end > _log.back().index) { + end = _log.back().index; } if (start < _log[0].index) { start = _log[0].index; } - for (size_t i = start - _cur; i <= end; ++i) { + for (size_t i = start - _cur; i <= end - _cur; ++i) { entries.push_back(_log[i]); } return entries; } +/// Get log entries from indices "start" to "end" +/// Throws std::out_of_range exception +log_t State::at(arangodb::consensus::index_t index) const { + + MUTEX_LOCKER(mutexLocker, _logLock); // Cannot be read lock (Compaction) + + + if (_cur > index) { + std::string excMessage = + std::string( + "Access before the start of the log deque: (first, requested): (") + + std::to_string(_cur) + ", " + std::to_string(index); + LOG_TOPIC(DEBUG, Logger::AGENCY) << excMessage; + throw std::out_of_range(excMessage); + } + + auto pos = index - _cur; + if (pos > _log.size()) { + std::string excMessage = + std::string( + "Access beyond the end of the log deque: (last, requested): (") + + std::to_string(_cur+_log.size()) + ", " + std::to_string(index); + LOG_TOPIC(DEBUG, Logger::AGENCY) << excMessage; + throw std::out_of_range(excMessage); + } + + return _log[pos]; + +} + + +/// Have log with specified index and term +bool State::has(arangodb::consensus::index_t index, term_t term) const { + + MUTEX_LOCKER(mutexLocker, _logLock); // Cannot be read lock (Compaction) + + try { + return _log.at(index-_cur).term == term; + } catch (...) {} + + return false; + +} + + /// Get vector of past transaction from 'start' to 'end' std::vector State::slices(arangodb::consensus::index_t start, arangodb::consensus::index_t end) const { @@ -906,3 +951,9 @@ std::vector> State::inquire(query_t const& query) const { } +// Index of last log entry +arangodb::consensus::index_t State::lastIndex() const { + MUTEX_LOCKER(mutexLocker, _logLock); + return (!_log.empty()) ? _log.back().index : 0; +} + diff --git a/arangod/Agency/State.h b/arangod/Agency/State.h index 95224de2cc..2631132af9 100644 --- a/arangod/Agency/State.h +++ b/arangod/Agency/State.h @@ -66,21 +66,27 @@ class State { std::vector const& indices, term_t term); /// @brief Single log entry (leader) - arangodb::consensus::index_t log( - velocypack::Slice const& slice, term_t term, - std::string const& clientId = std::string()); + index_t log(velocypack::Slice const& slice, term_t term, + std::string const& clientId = std::string()); /// @brief Log entries (followers) arangodb::consensus::index_t log(query_t const& queries, size_t ndups = 0); - + /// @brief Find entry at index with term bool find(index_t index, term_t term); /// @brief Get complete log entries bound by lower and upper bounds. /// Default: [first, last] std::vector get( - index_t = 0, index_t = (std::numeric_limits::max)()) const; - + index_t = 0, index_t = (std::numeric_limits::max)()) const; + + /// @brief Get complete log entries bound by lower and upper bounds. + /// Default: [first, last] + log_t at(index_t) const; + + /// @brief Has entry with index und term + bool has(index_t, term_t) const; + /// @brief Get log entries by client Id std::vector> inquire(query_t const&) const; @@ -96,6 +102,10 @@ class State { /// after the return log_t lastLog() const; + /// @brief last log entry, copy entry because we do no longer have the lock + /// after the return + index_t lastIndex() const; + /// @brief Set endpoint bool configure(Agent* agent); diff --git a/arangod/Agency/Store.cpp b/arangod/Agency/Store.cpp index b30da1eba7..fcd606999e 100644 --- a/arangod/Agency/Store.cpp +++ b/arangod/Agency/Store.cpp @@ -40,9 +40,8 @@ #include #include -using namespace arangodb::basics; using namespace arangodb::consensus; -using namespace arangodb::velocypack; +using namespace arangodb::basics; /// Non-Emptyness of string struct NotEmpty { @@ -353,15 +352,11 @@ std::vector Store::apply( auto headerFields = std::make_unique>(); - auto cc = ClusterComm::instance(); - if (cc != nullptr) { - // nullptr only happens on controlled shutdown - cc->asyncRequest( - "1", 1, endpoint, rest::RequestType::POST, path, - std::make_shared(body.toString()), headerFields, - std::make_shared(path, body.toJson()), 1.0, true, - 0.01); - } + arangodb::ClusterComm::instance()->asyncRequest( + "1", 1, endpoint, rest::RequestType::POST, path, + std::make_shared(body.toString()), headerFields, + std::make_shared(path, body.toJson()), 1.0, true, 0.01); + } else { LOG_TOPIC(WARN, Logger::AGENCY) << "Malformed URL " << url; } diff --git a/arangod/Agency/Store.h b/arangod/Agency/Store.h index bd2a8edb40..bfde6b673b 100644 --- a/arangod/Agency/Store.h +++ b/arangod/Agency/Store.h @@ -60,10 +60,10 @@ class Store : public arangodb::Thread { std::vector apply(query_t const& query, bool verbose = false); /// @brief Apply single entry in query - bool apply(arangodb::velocypack::Slice const& query, bool verbose = false); + bool apply(Slice const& query, bool verbose = false); /// @brief Apply entry in query - std::vector apply(std::vector const& query, + std::vector apply(std::vector const& query, index_t lastCommitIndex, term_t term, bool inform = true); @@ -81,7 +81,7 @@ class Store : public arangodb::Thread { bool start(); /// @brief Dump everything to builder - void dumpToBuilder(arangodb::velocypack::Builder&) const; + void dumpToBuilder(Builder&) const; /// @brief Notify observers void notifyObservers() const; @@ -92,7 +92,7 @@ class Store : public arangodb::Thread { Store& operator=(VPackSlice const& slice); /// @brief Create Builder representing this store - void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const; + void toBuilder(Builder&, bool showHidden = false) const; /// @brief Copy out a node Node get(std::string const& path = std::string("/")) const; diff --git a/arangod/Agency/Supervision.cpp b/arangod/Agency/Supervision.cpp index a4d72b550e..4711ddca32 100644 --- a/arangod/Agency/Supervision.cpp +++ b/arangod/Agency/Supervision.cpp @@ -41,9 +41,9 @@ #include "Basics/MutexLocker.h" using namespace arangodb; -using namespace arangodb::application_features; + using namespace arangodb::consensus; -using namespace arangodb::velocypack; +using namespace arangodb::application_features; std::string Supervision::_agencyPrefix = "/arango"; @@ -552,11 +552,13 @@ void Supervision::handleShutdown() { del->close(); auto result = _agent->write(del); if (result.indices.size() != 1) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Invalid resultsize of " << result.indices.size() - << " found during shutdown"; + LOG_TOPIC(ERR, Logger::AGENCY) + << "Invalid resultsize of " << result.indices.size() + << " found during shutdown"; } else { if (_agent->waitFor(result.indices.at(0)) != Agent::raft_commit_t::OK) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Result was not written to followers during shutdown"; + LOG_TOPIC(ERR, Logger::AGENCY) + << "Result was not written to followers during shutdown"; } } } diff --git a/arangod/Agency/UnassumedLeadership.cpp b/arangod/Agency/UnassumedLeadership.cpp index 569a30f99e..1aa7768685 100644 --- a/arangod/Agency/UnassumedLeadership.cpp +++ b/arangod/Agency/UnassumedLeadership.cpp @@ -27,7 +27,6 @@ #include "Agency/Job.h" using namespace arangodb::consensus; -using namespace arangodb::velocypack; UnassumedLeadership::UnassumedLeadership( Node const& snapshot, Agent* agent, std::string const& jobId, diff --git a/arangod/Agency/v8-agency.cpp b/arangod/Agency/v8-agency.cpp index 46264ba53b..3e14fc7a8b 100644 --- a/arangod/Agency/v8-agency.cpp +++ b/arangod/Agency/v8-agency.cpp @@ -39,7 +39,6 @@ using namespace arangodb; using namespace arangodb::application_features; using namespace arangodb::basics; using namespace arangodb::consensus; -using namespace arangodb::velocypack; static void JS_EnabledAgent(v8::FunctionCallbackInfo const& args) { TRI_V8_TRY_CATCH_BEGIN(isolate); diff --git a/arangod/Aql/Aggregator.cpp b/arangod/Aql/Aggregator.cpp index 9be6236774..89a47247ec 100644 --- a/arangod/Aql/Aggregator.cpp +++ b/arangod/Aql/Aggregator.cpp @@ -33,44 +33,43 @@ using namespace arangodb; using namespace arangodb::aql; using namespace arangodb::basics; -Aggregator* Aggregator::fromTypeString(transaction::Methods* trx, - std::string const& type) { +std::unique_ptr Aggregator::fromTypeString(transaction::Methods* trx, + std::string const& type) { if (type == "LENGTH" || type == "COUNT") { - return new AggregatorLength(trx); + return std::make_unique(trx); } if (type == "MIN") { - return new AggregatorMin(trx); + return std::make_unique(trx); } if (type == "MAX") { - return new AggregatorMax(trx); + return std::make_unique(trx); } if (type == "SUM") { - return new AggregatorSum(trx); + return std::make_unique(trx); } if (type == "AVERAGE" || type == "AVG") { - return new AggregatorAverage(trx); + return std::make_unique(trx); } if (type == "VARIANCE_POPULATION" || type == "VARIANCE") { - return new AggregatorVariance(trx, true); + return std::make_unique(trx, true); } if (type == "VARIANCE_SAMPLE") { - return new AggregatorVariance(trx, false); + return std::make_unique(trx, false); } if (type == "STDDEV_POPULATION" || type == "STDDEV") { - return new AggregatorStddev(trx, true); + return std::make_unique(trx, true); } if (type == "STDDEV_SAMPLE") { - return new AggregatorStddev(trx, false); + return std::make_unique(trx, false); } // aggregator function name should have been validated before - TRI_ASSERT(false); - return nullptr; + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid aggregator type"); } -Aggregator* Aggregator::fromVPack(transaction::Methods* trx, - arangodb::velocypack::Slice const& slice, - char const* variableName) { +std::unique_ptr Aggregator::fromVPack(transaction::Methods* trx, + arangodb::velocypack::Slice const& slice, + char const* variableName) { VPackSlice variable = slice.get(variableName); if (variable.isString()) { diff --git a/arangod/Aql/Aggregator.h b/arangod/Aql/Aggregator.h index b78f16f92d..395cbb7a26 100644 --- a/arangod/Aql/Aggregator.h +++ b/arangod/Aql/Aggregator.h @@ -49,10 +49,10 @@ struct Aggregator { virtual void reduce(AqlValue const&) = 0; virtual AqlValue stealValue() = 0; - static Aggregator* fromTypeString(transaction::Methods*, - std::string const&); - static Aggregator* fromVPack(transaction::Methods*, - arangodb::velocypack::Slice const&, char const*); + static std::unique_ptr fromTypeString(transaction::Methods*, + std::string const&); + static std::unique_ptr fromVPack(transaction::Methods*, + arangodb::velocypack::Slice const&, char const*); static bool isSupported(std::string const&); static bool requiresInput(std::string const&); diff --git a/arangod/Aql/AqlTransaction.cpp b/arangod/Aql/AqlTransaction.cpp index 4a076bbeb8..b63fe06cfc 100644 --- a/arangod/Aql/AqlTransaction.cpp +++ b/arangod/Aql/AqlTransaction.cpp @@ -42,11 +42,10 @@ int AqlTransaction::processCollection(aql::Collection* collection) { /// @brief add a coordinator collection to the transaction int AqlTransaction::processCollectionCoordinator(aql::Collection* collection) { - TRI_voc_cid_t cid = - this->resolver()->getCollectionId(collection->getName()); + TRI_voc_cid_t cid = resolver()->getCollectionId(collection->getName()); - return this->addCollection(cid, collection->getName().c_str(), - collection->accessType); + return addCollection(cid, collection->getName().c_str(), + collection->accessType); } /// @brief add a regular collection to the transaction @@ -55,7 +54,7 @@ int AqlTransaction::processCollectionNormal(aql::Collection* collection) { TRI_voc_cid_t cid = 0; arangodb::LogicalCollection const* col = - this->resolver()->getCollectionStruct(collection->getName()); + resolver()->getCollectionStruct(collection->getName()); /*if (col == nullptr) { auto startTime = TRI_microtime(); auto endTime = startTime + 60.0; @@ -72,8 +71,7 @@ int AqlTransaction::processCollectionNormal(aql::Collection* collection) { cid = col->cid(); } - int res = - this->addCollection(cid, collection->getName(), collection->accessType); + int res = addCollection(cid, collection->getName(), collection->accessType); if (res == TRI_ERROR_NO_ERROR && col != nullptr) { collection->setCollection(const_cast(col)); diff --git a/arangod/Aql/AqlTransaction.h b/arangod/Aql/AqlTransaction.h index 54867c552f..51f65e882e 100644 --- a/arangod/Aql/AqlTransaction.h +++ b/arangod/Aql/AqlTransaction.h @@ -62,14 +62,14 @@ class AqlTransaction final : public transaction::Methods { /// @brief add a list of collections to the transaction int addCollections( std::map const& collections) { - int ret = TRI_ERROR_NO_ERROR; for (auto const& it : collections) { - ret = processCollection(it.second); - if (ret != TRI_ERROR_NO_ERROR) { - break; + int res = processCollection(it.second); + + if (res != TRI_ERROR_NO_ERROR) { + return res; } } - return ret; + return TRI_ERROR_NO_ERROR; } /// @brief add a collection to the transaction diff --git a/arangod/Aql/AqlValue.cpp b/arangod/Aql/AqlValue.cpp index 3ccd62f84b..db776ca6ad 100644 --- a/arangod/Aql/AqlValue.cpp +++ b/arangod/Aql/AqlValue.cpp @@ -853,7 +853,9 @@ void AqlValue::toVelocyPack(transaction::Methods* trx, case VPACK_INLINE: case VPACK_MANAGED: { if (resolveExternals) { - arangodb::basics::VelocyPackHelper::SanitizeExternals(slice(), builder); + bool const sanitizeExternals = true; + bool const sanitizeCustom = true; + arangodb::basics::VelocyPackHelper::sanitizeNonClientTypes(slice(), VPackSlice::noneSlice(), builder, trx->transactionContextPtr()->getVPackOptions(), sanitizeExternals, sanitizeCustom); } else { builder.add(slice()); } diff --git a/arangod/Aql/Ast.cpp b/arangod/Aql/Ast.cpp index 72243d34fb..d4927a1960 100644 --- a/arangod/Aql/Ast.cpp +++ b/arangod/Aql/Ast.cpp @@ -95,12 +95,7 @@ std::unordered_map const Ast::ReversedOperators{ /// @brief create the AST Ast::Ast(Query* query) : _query(query), - _scopes(), - _variables(), - _bindParameters(), _root(nullptr), - _queries(), - _writeCollections(), _functionsMayAccessDocuments(false), _containsTraversal(false) { TRI_ASSERT(_query != nullptr); diff --git a/arangod/Aql/Ast.h b/arangod/Aql/Ast.h index 56f15d182f..a5e81fb078 100644 --- a/arangod/Aql/Ast.h +++ b/arangod/Aql/Ast.h @@ -40,14 +40,10 @@ namespace velocypack { class Slice; } -namespace transaction { -class Methods; -} -; - namespace aql { class Query; +class VariableGenerator; typedef std::unordered_map> TopLevelAttributes; @@ -58,7 +54,7 @@ class Ast { public: /// @brief create the AST - Ast(Query*); + explicit Ast(Query*); /// @brief destroy the AST ~Ast(); @@ -69,12 +65,7 @@ class Ast { /// @brief return the variable generator inline VariableGenerator* variables() { return &_variables; } - - /// @brief return the variable generator - inline VariableGenerator* variables() const { - return const_cast(&_variables); - } - + /// @brief return the root of the AST inline AstNode const* root() const { return _root; } diff --git a/arangod/Aql/BindParameters.h b/arangod/Aql/BindParameters.h index 0423550c39..bdd0c6dde0 100644 --- a/arangod/Aql/BindParameters.h +++ b/arangod/Aql/BindParameters.h @@ -39,7 +39,9 @@ class BindParameters { public: BindParameters(BindParameters const&) = delete; BindParameters& operator=(BindParameters const&) = delete; - BindParameters() = delete; + + BindParameters() + : _builder(nullptr), _parameters(), _processed(false) {} /// @brief create the parameters explicit BindParameters(std::shared_ptr builder) diff --git a/arangod/Aql/CalculationBlock.cpp b/arangod/Aql/CalculationBlock.cpp index b944ecf0d2..866edbf3e9 100644 --- a/arangod/Aql/CalculationBlock.cpp +++ b/arangod/Aql/CalculationBlock.cpp @@ -25,6 +25,7 @@ #include "Aql/AqlItemBlock.h" #include "Aql/ExecutionEngine.h" #include "Aql/Functions.h" +#include "Aql/Query.h" #include "Basics/Exceptions.h" #include "Basics/ScopeGuard.h" #include "Basics/VelocyPackHelper.h" diff --git a/arangod/Aql/ClusterBlocks.cpp b/arangod/Aql/ClusterBlocks.cpp index 792c9f0993..fdfe9baec5 100644 --- a/arangod/Aql/ClusterBlocks.cpp +++ b/arangod/Aql/ClusterBlocks.cpp @@ -29,6 +29,7 @@ #include "Aql/Collection.h" #include "Aql/ExecutionEngine.h" #include "Aql/ExecutionStats.h" +#include "Aql/Query.h" #include "Basics/Exceptions.h" #include "Basics/StaticStrings.h" #include "Basics/StringBuffer.h" diff --git a/arangod/Aql/ClusterNodes.cpp b/arangod/Aql/ClusterNodes.cpp index 45ea9f32bf..c04fbbffed 100644 --- a/arangod/Aql/ClusterNodes.cpp +++ b/arangod/Aql/ClusterNodes.cpp @@ -25,6 +25,7 @@ #include "Aql/Ast.h" #include "Aql/Collection.h" #include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" using namespace arangodb::basics; using namespace arangodb::aql; diff --git a/arangod/Aql/CollectBlock.cpp b/arangod/Aql/CollectBlock.cpp index 6e2de92888..370cbb1213 100644 --- a/arangod/Aql/CollectBlock.cpp +++ b/arangod/Aql/CollectBlock.cpp @@ -59,9 +59,6 @@ SortedCollectBlock::CollectGroup::~CollectGroup() { for (auto& it : groupBlocks) { delete it; } - for (auto& it : aggregators) { - delete it; - } } void SortedCollectBlock::CollectGroup::initialize(size_t capacity) { @@ -79,7 +76,6 @@ void SortedCollectBlock::CollectGroup::initialize(size_t capacity) { // reset aggregators for (auto& it : aggregators) { - TRI_ASSERT(it != nullptr); it->reset(); } } @@ -102,7 +98,6 @@ void SortedCollectBlock::CollectGroup::reset() { // reset all aggregators for (auto& it : aggregators) { - TRI_ASSERT(it != nullptr); it->reset(); } @@ -185,7 +180,7 @@ SortedCollectBlock::SortedCollectBlock(ExecutionEngine* engine, _aggregateRegisters.emplace_back( std::make_pair((*itOut).second.registerId, reg)); _currentGroup.aggregators.emplace_back( - Aggregator::fromTypeString(_trx, p.second.second)); + std::move(Aggregator::fromTypeString(_trx, p.second.second))); } TRI_ASSERT(_aggregateRegisters.size() == en->_aggregateVariables.size()); TRI_ASSERT(_aggregateRegisters.size() == _currentGroup.aggregators.size()); @@ -617,14 +612,8 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, // cleanup function for group values auto cleanup = [&allGroups]() -> void { for (auto& it : allGroups) { - if (it.second != nullptr) { - for (auto& it2 : *(it.second)) { - delete it2; - } - delete it.second; - } + delete it.second; } - allGroups.clear(); }; // prevent memory leaks by always cleaning up the groups @@ -643,8 +632,8 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, size_t row = 0; for (auto& it : allGroups) { - auto& keys = it.first; + TRI_ASSERT(it.second != nullptr); TRI_ASSERT(keys.size() == _groupRegisters.size()); size_t i = 0; @@ -653,7 +642,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, const_cast(&key)->erase(); // to prevent double-freeing later } - if (it.second != nullptr && !en->_count) { + if (!en->_count) { TRI_ASSERT(it.second->size() == _aggregateRegisters.size()); size_t j = 0; for (auto const& r : *(it.second)) { @@ -662,7 +651,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, } } else if (en->_count) { // set group count in result register - TRI_ASSERT(it.second != nullptr); + TRI_ASSERT(!it.second->empty()); result->setValue(row, _collectRegister, it.second->back()->stealValue()); } @@ -722,7 +711,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, // no aggregate registers. this means we'll only count the number of // items if (en->_count) { - aggregateValues->emplace_back(new AggregatorLength(_trx, 1)); + aggregateValues->emplace_back(std::move(std::make_unique(_trx, 1))); } } else { // we do have aggregate registers. create them as empty AqlValues @@ -732,7 +721,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, size_t j = 0; for (auto const& r : en->_aggregateVariables) { aggregateValues->emplace_back( - Aggregator::fromTypeString(_trx, r.second.second)); + std::move(Aggregator::fromTypeString(_trx, r.second.second))); aggregateValues->back()->reduce( GetValueForRegister(cur, _pos, _aggregateRegisters[j].second)); ++j; @@ -749,10 +738,12 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost, if (en->_aggregateVariables.empty()) { // no aggregate registers. simply increase the counter if (en->_count) { + TRI_ASSERT(!aggregateValues->empty()); aggregateValues->back()->reduce(AqlValue()); } } else { // apply the aggregators for the group + TRI_ASSERT(aggregateValues->size() == _aggregateRegisters.size()); size_t j = 0; for (auto const& r : _aggregateRegisters) { (*aggregateValues)[j]->reduce( diff --git a/arangod/Aql/CollectBlock.h b/arangod/Aql/CollectBlock.h index 38550c1e35..69e472044d 100644 --- a/arangod/Aql/CollectBlock.h +++ b/arangod/Aql/CollectBlock.h @@ -37,19 +37,16 @@ namespace arangodb { namespace transaction { class Methods; } -; namespace aql { struct Aggregator; class AqlItemBlock; class ExecutionEngine; - -typedef std::vector AggregateValuesType; + +typedef std::vector> AggregateValuesType; class SortedCollectBlock final : public ExecutionBlock { private: - typedef std::vector AggregateValuesType; - struct CollectGroup { std::vector groupValues; diff --git a/arangod/Aql/CollectNode.cpp b/arangod/Aql/CollectNode.cpp index 8e2068b713..dda4ca0228 100644 --- a/arangod/Aql/CollectNode.cpp +++ b/arangod/Aql/CollectNode.cpp @@ -24,6 +24,7 @@ #include "CollectNode.h" #include "Aql/Ast.h" #include "Aql/ExecutionPlan.h" +#include "Aql/VariableGenerator.h" #include "Aql/WalkerWorker.h" using namespace arangodb::aql; diff --git a/arangod/Aql/Collections.h b/arangod/Aql/Collections.h index 84e0d7a70f..59381d7810 100644 --- a/arangod/Aql/Collections.h +++ b/arangod/Aql/Collections.h @@ -52,6 +52,8 @@ class Collections { std::map const* collections() const; + bool empty() const { return _collections.empty(); } + private: TRI_vocbase_t* _vocbase; diff --git a/arangod/Aql/Condition.cpp b/arangod/Aql/Condition.cpp index f8a86ef518..1f602f6aa3 100644 --- a/arangod/Aql/Condition.cpp +++ b/arangod/Aql/Condition.cpp @@ -26,6 +26,7 @@ #include "Aql/AstNode.h" #include "Aql/Collection.h" #include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" #include "Aql/SortCondition.h" #include "Aql/Variable.h" #include "Basics/Exceptions.h" diff --git a/arangod/Aql/EnumerateCollectionBlock.cpp b/arangod/Aql/EnumerateCollectionBlock.cpp index 8bb7d39711..73f0868147 100644 --- a/arangod/Aql/EnumerateCollectionBlock.cpp +++ b/arangod/Aql/EnumerateCollectionBlock.cpp @@ -26,6 +26,7 @@ #include "Aql/AqlItemBlock.h" #include "Aql/Collection.h" #include "Aql/ExecutionEngine.h" +#include "Aql/Query.h" #include "Basics/Exceptions.h" #include "Cluster/FollowerInfo.h" #include "StorageEngine/DocumentIdentifierToken.h" diff --git a/arangod/Aql/ExecutionBlock.cpp b/arangod/Aql/ExecutionBlock.cpp index 6f3c1e49ec..f2c5499e3e 100644 --- a/arangod/Aql/ExecutionBlock.cpp +++ b/arangod/Aql/ExecutionBlock.cpp @@ -28,6 +28,7 @@ #include "Aql/Ast.h" #include "Aql/BlockCollector.h" #include "Aql/ExecutionEngine.h" +#include "Aql/Query.h" using namespace arangodb::aql; diff --git a/arangod/Aql/ExecutionEngine.cpp b/arangod/Aql/ExecutionEngine.cpp index c8045ce10a..51bc2a008b 100644 --- a/arangod/Aql/ExecutionEngine.cpp +++ b/arangod/Aql/ExecutionEngine.cpp @@ -34,6 +34,7 @@ #include "Aql/ExecutionNode.h" #include "Aql/IndexBlock.h" #include "Aql/ModificationBlocks.h" +#include "Aql/Query.h" #include "Aql/SortBlock.h" #include "Aql/SubqueryBlock.h" #include "Aql/TraversalBlock.h" diff --git a/arangod/Aql/ExecutionNode.cpp b/arangod/Aql/ExecutionNode.cpp index 0dffd59166..e131b1074d 100644 --- a/arangod/Aql/ExecutionNode.cpp +++ b/arangod/Aql/ExecutionNode.cpp @@ -31,6 +31,7 @@ #include "Aql/ExecutionPlan.h" #include "Aql/IndexNode.h" #include "Aql/ModificationNodes.h" +#include "Aql/Query.h" #include "Aql/SortNode.h" #include "Aql/TraversalNode.h" #include "Aql/ShortestPathNode.h" @@ -1375,7 +1376,7 @@ ExecutionNode* CalculationNode::clone(ExecutionPlan* plan, outVariable = plan->getAst()->variables()->createVariable(outVariable); } - auto c = new CalculationNode(plan, _id, _expression->clone(), + auto c = new CalculationNode(plan, _id, _expression->clone(plan->getAst()), conditionVariable, outVariable); c->_canRemoveIfThrows = _canRemoveIfThrows; diff --git a/arangod/Aql/ExecutionPlan.cpp b/arangod/Aql/ExecutionPlan.cpp index f90f3f7a0e..29637b9550 100644 --- a/arangod/Aql/ExecutionPlan.cpp +++ b/arangod/Aql/ExecutionPlan.cpp @@ -228,8 +228,6 @@ void ExecutionPlan::getCollectionsFromVelocyPack(Ast* ast, } for (auto const& collection : VPackArrayIterator(collectionsSlice)) { - auto typeStr = arangodb::basics::VelocyPackHelper::checkAndGetStringValue( - collection, "type"); ast->query()->collections()->add( arangodb::basics::VelocyPackHelper::checkAndGetStringValue(collection, "name"), @@ -276,8 +274,8 @@ class CloneNodeAdder final : public WalkerWorker { }; /// @brief clone an existing execution plan -ExecutionPlan* ExecutionPlan::clone() { - auto plan = std::make_unique(_ast); +ExecutionPlan* ExecutionPlan::clone(Ast* ast) { + auto plan = std::make_unique(ast); plan->_root = _root->clone(plan.get(), true, false); plan->_nextId = _nextId; @@ -297,13 +295,19 @@ ExecutionPlan* ExecutionPlan::clone() { return plan.release(); } +/// @brief clone an existing execution plan +ExecutionPlan* ExecutionPlan::clone() { + return clone(_ast); +} + /// @brief create an execution plan identical to this one /// keep the memory of the plan on the query object specified. ExecutionPlan* ExecutionPlan::clone(Query const& query) { auto otherPlan = std::make_unique(query.ast()); for (auto const& it : _ids) { - otherPlan->registerNode(it.second->clone(otherPlan.get(), false, true)); + auto clonedNode = it.second->clone(otherPlan.get(), false, true); + otherPlan->registerNode(clonedNode); } return otherPlan.release(); diff --git a/arangod/Aql/ExecutionPlan.h b/arangod/Aql/ExecutionPlan.h index 39de6a2c35..ba9243f3fa 100644 --- a/arangod/Aql/ExecutionPlan.h +++ b/arangod/Aql/ExecutionPlan.h @@ -28,7 +28,6 @@ #include "Aql/CollectOptions.h" #include "Aql/ExecutionNode.h" #include "Aql/ModificationOptions.h" -#include "Aql/Query.h" #include "Aql/types.h" #include "Basics/SmallVector.h" @@ -40,6 +39,7 @@ struct AstNode; class CalculationNode; class CollectNode; class ExecutionNode; +class Query; class ExecutionPlan { public: @@ -60,6 +60,8 @@ class ExecutionPlan { /// @brief create an execution plan from VelocyPack static ExecutionPlan* instantiateFromVelocyPack( Ast* ast, arangodb::velocypack::Slice const); + + ExecutionPlan* clone(Ast*); /// @brief clone the plan by recursively cloning starting from the root ExecutionPlan* clone(); @@ -69,9 +71,9 @@ class ExecutionPlan { ExecutionPlan* clone(Query const&); /// @brief export to VelocyPack - std::shared_ptr toVelocyPack(Ast*, bool) const; + std::shared_ptr toVelocyPack(Ast*, bool verbose) const; - void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool) const; + void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool verbose) const; /// @brief check if the plan is empty inline bool empty() const { return (_root == nullptr); } diff --git a/arangod/Aql/Expression.h b/arangod/Aql/Expression.h index c8813c2818..a4a3bb40f0 100644 --- a/arangod/Aql/Expression.h +++ b/arangod/Aql/Expression.h @@ -107,10 +107,10 @@ class Expression { } /// @brief clone the expression, needed to clone execution plans - Expression* clone() { + Expression* clone(Ast* ast) { // We do not need to copy the _ast, since it is managed by the // query object and the memory management of the ASTs - return new Expression(_ast, _node); + return new Expression(ast != nullptr ? ast : _ast, _node); } /// @brief return all variables used in the expression diff --git a/arangod/Aql/Functions.cpp b/arangod/Aql/Functions.cpp index 489390ed56..fb0c9e1592 100644 --- a/arangod/Aql/Functions.cpp +++ b/arangod/Aql/Functions.cpp @@ -411,13 +411,11 @@ void Functions::Stringify(transaction::Methods* trx, return; } - if (slice.isObject() || slice.isArray()) { - VPackDumper dumper(&buffer, trx->transactionContextPtr()->getVPackOptions()); - dumper.dump(slice); - return; - } - - VPackDumper dumper(&buffer); + VPackOptions* options = trx->transactionContextPtr()->getVPackOptionsForDump(); + VPackOptions adjustedOptions = *options; + adjustedOptions.escapeUnicode = false; + adjustedOptions.escapeForwardSlashes = false; + VPackDumper dumper(&buffer, &adjustedOptions); dumper.dump(slice); } @@ -2270,7 +2268,7 @@ AqlValue Functions::Zip(arangodb::aql::Query* query, for (VPackValueLength i = 0; i < n; ++i) { buffer->reset(); Stringify(trx, adapter, keysSlice.at(i)); - builder->add(std::string(buffer->c_str(), buffer->length()), valuesSlice.at(i)); + builder->add(buffer->c_str(), buffer->length(), valuesSlice.at(i)); } builder->close(); return AqlValue(builder.get()); diff --git a/arangod/Aql/IndexBlock.cpp b/arangod/Aql/IndexBlock.cpp index f2743d25c8..5812dd843c 100644 --- a/arangod/Aql/IndexBlock.cpp +++ b/arangod/Aql/IndexBlock.cpp @@ -28,6 +28,7 @@ #include "Aql/Condition.h" #include "Aql/ExecutionEngine.h" #include "Aql/Functions.h" +#include "Aql/Query.h" #include "Basics/ScopeGuard.h" #include "Basics/Exceptions.h" #include "Basics/StaticStrings.h" diff --git a/arangod/Aql/IndexNode.cpp b/arangod/Aql/IndexNode.cpp index 658e460749..1f1aa704e6 100644 --- a/arangod/Aql/IndexNode.cpp +++ b/arangod/Aql/IndexNode.cpp @@ -26,6 +26,7 @@ #include "Aql/Collection.h" #include "Aql/Condition.h" #include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" #include "Transaction/Methods.h" #include diff --git a/arangod/Aql/ModificationBlocks.cpp b/arangod/Aql/ModificationBlocks.cpp index bd8042dd73..f5cce6067c 100644 --- a/arangod/Aql/ModificationBlocks.cpp +++ b/arangod/Aql/ModificationBlocks.cpp @@ -48,7 +48,7 @@ ModificationBlock::ModificationBlock(ExecutionEngine* engine, _isDBServer(false), _usesDefaultSharding(true) { - _trx->orderDitch(_collection->cid()); + _trx->pinData(_collection->cid()); auto const& registerPlan = ep->getRegisterPlan()->varInfo; diff --git a/arangod/Aql/ModificationNodes.cpp b/arangod/Aql/ModificationNodes.cpp index f77456f60c..f4ff2a7be0 100644 --- a/arangod/Aql/ModificationNodes.cpp +++ b/arangod/Aql/ModificationNodes.cpp @@ -25,6 +25,8 @@ #include "Aql/Ast.h" #include "Aql/Collection.h" #include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" +#include "Aql/VariableGenerator.h" using namespace arangodb::aql; diff --git a/arangod/Aql/OptimizerRules.cpp b/arangod/Aql/OptimizerRules.cpp index 40bd985a57..ed41b13955 100644 --- a/arangod/Aql/OptimizerRules.cpp +++ b/arangod/Aql/OptimizerRules.cpp @@ -32,6 +32,7 @@ #include "Aql/Function.h" #include "Aql/IndexNode.h" #include "Aql/ModificationNodes.h" +#include "Aql/Query.h" #include "Aql/ShortestPathNode.h" #include "Aql/SortCondition.h" #include "Aql/SortNode.h" diff --git a/arangod/Aql/Parser.h b/arangod/Aql/Parser.h index 074df0b88c..b7dd9b1f5a 100644 --- a/arangod/Aql/Parser.h +++ b/arangod/Aql/Parser.h @@ -57,7 +57,7 @@ namespace aql { class Parser { public: /// @brief create the parser - Parser(Query*); + explicit Parser(Query*); /// @brief destroy the parser ~Parser(); diff --git a/arangod/Aql/PlanCache.cpp b/arangod/Aql/PlanCache.cpp new file mode 100644 index 0000000000..690e6b878e --- /dev/null +++ b/arangod/Aql/PlanCache.cpp @@ -0,0 +1,98 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Jan Steemann +//////////////////////////////////////////////////////////////////////////////// + +#include "PlanCache.h" +#include "Aql/Ast.h" +#include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" +#include "Basics/ReadLocker.h" +#include "Basics/WriteLocker.h" +#include "VocBase/vocbase.h" + +#include + +using namespace arangodb::aql; + +/// @brief singleton instance of the plan cache +static arangodb::aql::PlanCache Instance; + +/// @brief create the plan cache +PlanCache::PlanCache() : _lock(), _plans() {} + +/// @brief destroy the plan cache +PlanCache::~PlanCache() {} + +/// @brief lookup a plan in the cache +std::shared_ptr PlanCache::lookup(TRI_vocbase_t* vocbase, uint64_t hash, + char const* queryString, + size_t queryStringLength) { + READ_LOCKER(readLocker, _lock); + + auto it = _plans.find(vocbase); + + if (it == _plans.end()) { + // no entry found for the requested database + return std::shared_ptr(); + } + + auto it2 = (*it).second.find(hash); + + if (it2 == (*it).second.end()) { + // plan not found in cache + return std::shared_ptr(); + } + + // plan found in cache + return (*it2).second; +} + +/// @brief store a plan in the cache +void PlanCache::store( + TRI_vocbase_t* vocbase, uint64_t hash, char const* queryString, + size_t queryStringLength, ExecutionPlan const* plan) { + + auto entry = std::make_unique(std::string(queryString, queryStringLength), plan->toVelocyPack(plan->getAst(), true)); + + WRITE_LOCKER(writeLocker, _lock); + + auto it = _plans.find(vocbase); + + if (it == _plans.end()) { + // create entry for the current database + it = _plans.emplace(vocbase, std::unordered_map>()).first; + } + + // store cache entry + (*it).second.emplace(hash, std::move(entry)); +} + +/// @brief invalidate all queries for a particular database +void PlanCache::invalidate(TRI_vocbase_t* vocbase) { + WRITE_LOCKER(writeLocker, _lock); + + _plans.erase(vocbase); +} + +/// @brief get the plan cache instance +PlanCache* PlanCache::instance() { return &Instance; } + diff --git a/arangod/Aql/PlanCache.h b/arangod/Aql/PlanCache.h new file mode 100644 index 0000000000..d9c702e56c --- /dev/null +++ b/arangod/Aql/PlanCache.h @@ -0,0 +1,84 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Jan Steemann +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGOD_AQL_PLAN_CACHE_H +#define ARANGOD_AQL_PLAN_CACHE_H 1 + +#include "Basics/Common.h" +#include "Basics/ReadWriteLock.h" + +struct TRI_vocbase_t; + +namespace arangodb { +namespace velocypack { +class Builder; +} + +namespace aql { +class ExecutionPlan; +class VariableGenerator; + +struct PlanCacheEntry { + PlanCacheEntry(std::string&& queryString, + std::shared_ptr builder) + : queryString(std::move(queryString)), builder(builder) {} + + std::string queryString; + std::shared_ptr builder; +}; + +class PlanCache { + public: + PlanCache(PlanCache const&) = delete; + PlanCache& operator=(PlanCache const&) = delete; + + /// @brief create cache + PlanCache(); + + /// @brief destroy the cache + ~PlanCache(); + + public: + /// @brief lookup a plan in the cache + std::shared_ptr lookup(TRI_vocbase_t*, uint64_t, char const*, size_t); + + /// @brief store a plan in the cache + void store(TRI_vocbase_t*, uint64_t, char const*, size_t, ExecutionPlan const*); + + /// @brief invalidate all plans for a particular database + void invalidate(TRI_vocbase_t*); + + /// @brief get the pointer to the global plan cache + static PlanCache* instance(); + + private: + /// @brief read-write lock for the cache + arangodb::basics::ReadWriteLock _lock; + + /// @brief cached query plans, organized per database + std::unordered_map>> _plans; +}; +} +} + +#endif diff --git a/arangod/Aql/Query.cpp b/arangod/Aql/Query.cpp index cc80f69e5b..2b29ea2ff7 100644 --- a/arangod/Aql/Query.cpp +++ b/arangod/Aql/Query.cpp @@ -31,6 +31,7 @@ #include "Aql/Executor.h" #include "Aql/Optimizer.h" #include "Aql/Parser.h" +#include "Aql/PlanCache.h" #include "Aql/QueryCache.h" #include "Aql/QueryList.h" #include "Basics/Exceptions.h" @@ -54,11 +55,17 @@ #include #include +#ifndef USE_PLAN_CACHE +#undef USE_PLAN_CACHE +#endif + using namespace arangodb; using namespace arangodb::aql; namespace { static std::atomic NextQueryId(1); + +constexpr uint64_t DontCache = 0; } /// @brief names of query phases / states @@ -148,20 +155,15 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase, _resourceMonitor(), _resources(&_resourceMonitor), _vocbase(vocbase), - _executor(nullptr), _context(nullptr), _queryString(queryString), - _queryLength(queryLength), + _queryStringLength(queryLength), _queryBuilder(), _bindParameters(bindParameters), _options(options), _collections(vocbase), - _ast(nullptr), - _profile(nullptr), _state(INVALID_STATE), - _parser(nullptr), _trx(nullptr), - _engine(nullptr), _maxWarningCount(10), _warnings(), _startTime(TRI_microtime()), @@ -221,20 +223,14 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase, _resourceMonitor(), _resources(&_resourceMonitor), _vocbase(vocbase), - _executor(nullptr), _context(nullptr), _queryString(nullptr), - _queryLength(0), + _queryStringLength(0), _queryBuilder(queryStruct), - _bindParameters(nullptr), _options(options), _collections(vocbase), - _ast(nullptr), - _profile(nullptr), _state(INVALID_STATE), - _parser(nullptr), _trx(nullptr), - _engine(nullptr), _maxWarningCount(10), _warnings(), _startTime(TRI_microtime()), @@ -278,11 +274,9 @@ Query::~Query() { } cleanupPlanAndEngine(TRI_ERROR_INTERNAL); // abort the transaction - delete _profile; - _profile = nullptr; + _profile.reset(); - delete _executor; - _executor = nullptr; + _executor.reset(); if (_context != nullptr) { TRI_ASSERT(!_contextOwnedByExterior); @@ -300,8 +294,7 @@ Query::~Query() { _context = nullptr; } - delete _ast; - _ast = nullptr; + _ast.reset(); for (auto& it : _graphs) { delete it.second; @@ -317,7 +310,7 @@ Query::~Query() { /// the query Query* Query::clone(QueryPart part, bool withPlan) { auto clone = - std::make_unique(false, _vocbase, _queryString, _queryLength, + std::make_unique(false, _vocbase, _queryString, _queryStringLength, std::shared_ptr(), _options, part); clone->_resourceMonitor = _resourceMonitor; @@ -373,7 +366,7 @@ std::string Query::extractRegion(int line, int column) const { char c; char const* p = _queryString; - while ((static_cast(p - _queryString) < _queryLength) && (c = *p)) { + while ((static_cast(p - _queryString) < _queryStringLength) && (c = *p)) { if (currentLine > line || (currentLine >= line && currentColumn >= column)) { break; @@ -406,9 +399,9 @@ std::string Query::extractRegion(int line, int column) const { static int const SNIPPET_LENGTH = 32; static char const* SNIPPET_SUFFIX = "..."; - if (_queryLength < offset + SNIPPET_LENGTH) { + if (_queryStringLength < offset + SNIPPET_LENGTH) { // return a copy of the region - return std::string(_queryString + offset, _queryLength - offset); + return std::string(_queryString + offset, _queryStringLength - offset); } // copy query part @@ -461,157 +454,195 @@ void Query::registerWarning(int code, char const* details) { } } +void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) { + TRI_ASSERT(registry != nullptr); + + init(); + enterState(PARSING); + + std::unique_ptr plan; + +#if USE_PLAN_CACHE + if (_queryString != nullptr && + queryStringHash != DontCache && + _part == PART_MAIN) { + // LOG_TOPIC(INFO, Logger::FIXME) << "trying to find query in execution plan cache: '" << std::string(_queryString, _queryStringLength) << "', hash: " << queryStringHash; + + // store & lookup velocypack plans!! + std::shared_ptr planCacheEntry = PlanCache::instance()->lookup(_vocbase, queryStringHash, _queryString, _queryStringLength); + if (planCacheEntry != nullptr) { + // LOG_TOPIC(INFO, Logger::FIXME) << "query found in execution plan cache: '" << std::string(_queryString, _queryStringLength) << "'"; + + TRI_ASSERT(_trx == nullptr); + TRI_ASSERT(_collections.empty()); + + // create the transaction object, but do not start it yet + AqlTransaction* trx = new AqlTransaction( + createTransactionContext(), _collections.collections(), + _part == PART_MAIN); + _trx = trx; + + VPackBuilder* builder = planCacheEntry->builder.get(); + VPackSlice slice = builder->slice(); + ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), slice); + _ast->variables()->fromVelocyPack(slice); + + enterState(LOADING_COLLECTIONS); + + int res = trx->addCollections(*_collections.collections()); + + if (res == TRI_ERROR_NO_ERROR) { + res = _trx->begin(); + } + + if (res != TRI_ERROR_NO_ERROR) { + THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res)); + } + + enterState(PLAN_INSTANTIATION); + + plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), slice)); + + TRI_ASSERT(plan != nullptr); + } + } +#endif + + if (plan == nullptr) { + plan.reset(prepare()); + + TRI_ASSERT(plan != nullptr); + +#if USE_PLAN_CACHE + if (_queryString != nullptr && + queryStringHash != DontCache && + _part == PART_MAIN && + _warnings.empty() && + _ast->root()->isCacheable()) { + // LOG_TOPIC(INFO, Logger::FIXME) << "storing query in execution plan cache '" << std::string(_queryString, _queryStringLength) << "', hash: " << queryStringHash; + PlanCache::instance()->store(_vocbase, queryStringHash, _queryString, _queryStringLength, plan.get()); + } +#endif + } + + enterState(EXECUTION); + + TRI_ASSERT(_engine == nullptr); + // note that the engine returned here may already be present in our + // own _engine attribute (the instanciation procedure may modify us + // by calling our engine(ExecutionEngine*) function + // this is confusing and should be fixed! + std::unique_ptr engine(ExecutionEngine::instantiateFromPlan(registry, this, plan.get(), _queryString != nullptr)); + + if (_engine == nullptr) { + _engine = std::move(engine); + } else { + engine.release(); + } + + _plan = std::move(plan); +} + /// @brief prepare an AQL query, this is a preparation for execute, but /// execute calls it internally. The purpose of this separate method is /// to be able to only prepare a query from VelocyPack and then store it in the /// QueryRegistry. -QueryResult Query::prepare(QueryRegistry* registry) { +ExecutionPlan* Query::prepare() { LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " " << "Query::prepare" << " this: " << (uintptr_t) this; - TRI_ASSERT(registry != nullptr); - - try { - init(); - enterState(PARSING); + std::unique_ptr plan; + if (_queryString != nullptr) { auto parser = std::make_unique(this); - std::unique_ptr plan; - - if (_queryString != nullptr) { - parser->parse(false); - // put in bind parameters - parser->ast()->injectBindParameters(_bindParameters); - } - + + parser->parse(false); + // put in bind parameters + parser->ast()->injectBindParameters(_bindParameters); _isModificationQuery = parser->isModificationQuery(); - - // create the transaction object, but do not start it yet - AqlTransaction* trx = new AqlTransaction( - createTransactionContext(), _collections.collections(), - _part == PART_MAIN); - _trx = trx; - - try { - bool planRegisters; - // As soon as we start du instantiate the plan we have to clean it - // up before killing the unique_ptr - if (_queryString != nullptr) { - // we have an AST - // optimize the ast - enterState(AST_OPTIMIZATION); - - parser->ast()->validateAndOptimize(); - - enterState(LOADING_COLLECTIONS); - - int res = trx->begin(); - - if (res != TRI_ERROR_NO_ERROR) { - return transactionError(res); - } - - enterState(PLAN_INSTANTIATION); - plan.reset(ExecutionPlan::instantiateFromAst(parser->ast())); - - if (plan.get() == nullptr) { - // oops - return QueryResult(TRI_ERROR_INTERNAL, - "failed to create query execution engine"); - } - - // Run the query optimizer: - enterState(PLAN_OPTIMIZATION); - arangodb::aql::Optimizer opt(maxNumberOfPlans()); - // get enabled/disabled rules - opt.createPlans(plan.release(), getRulesFromOptions(), - inspectSimplePlans()); - // Now plan and all derived plans belong to the optimizer - plan.reset(opt.stealBest()); // Now we own the best one again - planRegisters = true; - } else { // no queryString, we are instantiating from _queryBuilder - enterState(PARSING); - - VPackSlice const querySlice = _queryBuilder->slice(); - ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice); - - parser->ast()->variables()->fromVelocyPack(querySlice); - // creating the plan may have produced some collections - // we need to add them to the transaction now (otherwise the query will - // fail) - - enterState(LOADING_COLLECTIONS); - - int res = trx->addCollections(*_collections.collections()); - - if (res == TRI_ERROR_NO_ERROR) { - res = trx->begin(); - } - - if (res != TRI_ERROR_NO_ERROR) { - return transactionError(res); - } - - enterState(PLAN_INSTANTIATION); - - // we have an execution plan in VelocyPack format - plan.reset(ExecutionPlan::instantiateFromVelocyPack( - parser->ast(), _queryBuilder->slice())); - if (plan.get() == nullptr) { - // oops - return QueryResult(TRI_ERROR_INTERNAL); - } - - planRegisters = false; - } - - TRI_ASSERT(plan.get() != nullptr); - - // varsUsedLater and varsValid are unordered_sets and so their orders - // are not the same in the serialized and deserialized plans - - // return the V8 context - exitContext(); - - enterState(EXECUTION); - ExecutionEngine* engine(ExecutionEngine::instantiateFromPlan( - registry, this, plan.get(), planRegisters)); - - // If all went well so far, then we keep _plan, _parser and _trx and - // return: - _plan = std::move(plan); - _parser = parser.release(); - _engine = engine; - return QueryResult(); - } catch (arangodb::basics::Exception const& ex) { - cleanupPlanAndEngine(ex.code()); - return QueryResult(ex.code(), ex.message() + getStateString()); - } catch (std::bad_alloc const&) { - cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY); - return QueryResult( - TRI_ERROR_OUT_OF_MEMORY, - TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString()); - } catch (std::exception const& ex) { - cleanupPlanAndEngine(TRI_ERROR_INTERNAL); - return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString()); - } catch (...) { - cleanupPlanAndEngine(TRI_ERROR_INTERNAL); - return QueryResult(TRI_ERROR_INTERNAL, - TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString()); - } - } catch (arangodb::basics::Exception const& ex) { - return QueryResult(ex.code(), ex.message() + getStateString()); - } catch (std::bad_alloc const&) { - return QueryResult( - TRI_ERROR_OUT_OF_MEMORY, - TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString()); - } catch (std::exception const& ex) { - return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString()); - } catch (...) { - return QueryResult(TRI_ERROR_INTERNAL, - TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString()); } + TRI_ASSERT(_trx == nullptr); + + // create the transaction object, but do not start it yet + AqlTransaction* trx = new AqlTransaction( + createTransactionContext(), _collections.collections(), + _part == PART_MAIN); + _trx = trx; + + // As soon as we start du instantiate the plan we have to clean it + // up before killing the unique_ptr + if (_queryString != nullptr) { + // we have an AST + // optimize the ast + enterState(AST_OPTIMIZATION); + + _ast->validateAndOptimize(); + + enterState(LOADING_COLLECTIONS); + + int res = _trx->begin(); + + if (res != TRI_ERROR_NO_ERROR) { + THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res)); + } + + enterState(PLAN_INSTANTIATION); + plan.reset(ExecutionPlan::instantiateFromAst(_ast.get())); + + if (plan.get() == nullptr) { + // oops + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "failed to create query execution engine"); + } + + // Run the query optimizer: + enterState(PLAN_OPTIMIZATION); + arangodb::aql::Optimizer opt(maxNumberOfPlans()); + // get enabled/disabled rules + opt.createPlans(plan.release(), getRulesFromOptions(), + inspectSimplePlans()); + // Now plan and all derived plans belong to the optimizer + plan.reset(opt.stealBest()); // Now we own the best one again + } else { // no queryString, we are instantiating from _queryBuilder + VPackSlice const querySlice = _queryBuilder->slice(); + ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), querySlice); + + _ast->variables()->fromVelocyPack(querySlice); + // creating the plan may have produced some collections + // we need to add them to the transaction now (otherwise the query will + // fail) + + enterState(LOADING_COLLECTIONS); + + int res = trx->addCollections(*_collections.collections()); + + if (res == TRI_ERROR_NO_ERROR) { + res = _trx->begin(); + } + + if (res != TRI_ERROR_NO_ERROR) { + THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res)); + } + + enterState(PLAN_INSTANTIATION); + + // we have an execution plan in VelocyPack format + plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), _queryBuilder->slice())); + if (plan.get() == nullptr) { + // oops + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "could not create plan from vpack"); + } + } + + TRI_ASSERT(plan != nullptr); + + // varsUsedLater and varsValid are unordered_sets and so their orders + // are not the same in the serialized and deserialized plans + + // return the V8 context if we are in one + exitContext(); + + return plan.release(); } /// @brief execute an AQL query @@ -625,20 +656,17 @@ QueryResult Query::execute(QueryRegistry* registry) { try { bool useQueryCache = canUseQueryCache(); - uint64_t queryStringHash = 0; + uint64_t queryStringHash = hash(); if (useQueryCache) { - // hash the query - queryStringHash = hash(); - // check the query cache for an existing result auto cacheEntry = arangodb::aql::QueryCache::instance()->lookup( - _vocbase, queryStringHash, _queryString, _queryLength); + _vocbase, queryStringHash, _queryString, _queryStringLength); arangodb::aql::QueryCacheResultEntryGuard guard(cacheEntry); if (cacheEntry != nullptr) { // got a result from the query cache - QueryResult res(TRI_ERROR_NO_ERROR); + QueryResult res; // we don't have yet a transaction when we're here, so let's create // a mimimal context to build the result res.context = std::make_shared(_vocbase); @@ -651,18 +679,15 @@ QueryResult Query::execute(QueryRegistry* registry) { } } - QueryResult result = prepare(registry); - - if (result.code != TRI_ERROR_NO_ERROR) { - return result; - } + // will throw if it fails + prepare(registry, queryStringHash); if (_queryString == nullptr) { // we don't have query string... now pass query id to WorkMonitor work.reset(new AqlWorkStack(_vocbase, _id)); } else { // we do have a query string... pass query to WorkMonitor - work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryLength)); + work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryStringLength)); } log(); @@ -672,20 +697,22 @@ QueryResult Query::execute(QueryRegistry* registry) { useQueryCache = false; } - AqlItemBlock* value = nullptr; VPackOptions options = VPackOptions::Defaults; options.buildUnindexedArrays = true; options.buildUnindexedObjects = true; - TRI_ASSERT(_engine != nullptr); auto resultBuilder = std::make_shared(&options); resultBuilder->buffer()->reserve( 16 * 1024); // reserve some space in Builder to avoid frequent reallocs + + TRI_ASSERT(_engine != nullptr); + + // this is the RegisterId our results can be found in + auto const resultRegister = _engine->resultRegister(); + AqlItemBlock* value = nullptr; try { resultBuilder->openArray(); - // this is the RegisterId our results can be found in - auto const resultRegister = _engine->resultRegister(); if (useQueryCache) { // iterate over result, return it and store it in query cache @@ -712,7 +739,7 @@ QueryResult Query::execute(QueryRegistry* registry) { if (_warnings.empty()) { // finally store the generated result in the query cache auto result = QueryCache::instance()->store( - _vocbase, queryStringHash, _queryString, _queryLength, + _vocbase, queryStringHash, _queryString, _queryStringLength, resultBuilder, _trx->state()->collectionNames()); if (result == nullptr) { @@ -742,9 +769,19 @@ QueryResult Query::execute(QueryRegistry* registry) { delete value; throw; } + + LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " " + << "Query::execute: before _trx->commit" + << " this: " << (uintptr_t) this; _trx->commit(); + LOG_TOPIC(DEBUG, Logger::QUERIES) + << TRI_microtime() - _startTime << " " + << "Query::execute: before cleanupPlanAndEngine" + << " this: " << (uintptr_t) this; + + QueryResult result; result.context = _trx->transactionContext(); _engine->_stats.setExecutionTime(TRI_microtime() - _startTime); @@ -752,7 +789,7 @@ QueryResult Query::execute(QueryRegistry* registry) { cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get()); enterState(FINALIZATION); - + result.warnings = warningsToVelocyPack(); result.result = resultBuilder; result.stats = stats; @@ -797,46 +834,48 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) { LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " " << "Query::executeV8" << " this: " << (uintptr_t) this; + TRI_ASSERT(registry != nullptr); + std::unique_ptr work; try { bool useQueryCache = canUseQueryCache(); - uint64_t queryStringHash = 0; + uint64_t queryStringHash = hash(); if (useQueryCache) { - // hash the query - queryStringHash = hash(); - // check the query cache for an existing result auto cacheEntry = arangodb::aql::QueryCache::instance()->lookup( - _vocbase, queryStringHash, _queryString, _queryLength); + _vocbase, queryStringHash, _queryString, _queryStringLength); arangodb::aql::QueryCacheResultEntryGuard guard(cacheEntry); if (cacheEntry != nullptr) { // got a result from the query cache - QueryResultV8 res(TRI_ERROR_NO_ERROR); + QueryResultV8 result; // we don't have yet a transaction when we're here, so let's create // a mimimal context to build the result - res.context = std::make_shared(_vocbase); + result.context = std::make_shared(_vocbase); v8::Handle values = TRI_VPackToV8(isolate, cacheEntry->_queryResult->slice(), - res.context->getVPackOptions()); + result.context->getVPackOptions()); TRI_ASSERT(values->IsArray()); - res.result = v8::Handle::Cast(values); - res.cached = true; - return res; + result.result = v8::Handle::Cast(values); + result.cached = true; + return result; } } - QueryResultV8 result = prepare(registry); - - if (result.code != TRI_ERROR_NO_ERROR) { - return result; + // will throw if it fails + prepare(registry, queryStringHash); + + if (_queryString == nullptr) { + // we don't have query string... now pass query id to WorkMonitor + work.reset(new AqlWorkStack(_vocbase, _id)); + } else { + // we do have a query string... pass query to WorkMonitor + work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryStringLength)); } - work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryLength)); - log(); if (useQueryCache && (_isModificationQuery || !_warnings.empty() || @@ -844,6 +883,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) { useQueryCache = false; } + QueryResultV8 result; result.result = v8::Array::New(isolate); TRI_ASSERT(_engine != nullptr); @@ -884,7 +924,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) { if (_warnings.empty()) { // finally store the generated result in the query cache QueryCache::instance()->store(_vocbase, queryStringHash, _queryString, - _queryLength, builder, + _queryStringLength, builder, _trx->state()->collectionNames()); } } else { @@ -930,6 +970,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) { << " this: " << (uintptr_t) this; result.context = _trx->transactionContext(); + _engine->_stats.setExecutionTime(TRI_microtime() - _startTime); auto stats = std::make_shared(); cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get()); @@ -946,6 +987,10 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) { // patch executionTime stats value in place // we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime); + + LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " " + << "Query::executeV8:returning" + << " this: " << (uintptr_t) this; return result; } catch (arangodb::basics::Exception const& ex) { @@ -1017,7 +1062,7 @@ QueryResult Query::explain() { int res = _trx->begin(); if (res != TRI_ERROR_NO_ERROR) { - return transactionError(res); + THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res)); } enterState(PLAN_INSTANTIATION); @@ -1066,7 +1111,7 @@ QueryResult Query::explain() { result.result = bestPlan->toVelocyPack(parser.ast(), verbosePlans()); // cacheability - result.cached = (_queryString != nullptr && _queryLength > 0 && + result.cached = (_queryString != nullptr && _queryStringLength > 0 && !_isModificationQuery && _warnings.empty() && _ast->root()->isCacheable()); } @@ -1091,16 +1136,20 @@ QueryResult Query::explain() { TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString()); } } + +void Query::engine(ExecutionEngine* engine) { + _engine.reset(engine); +} /// @brief get v8 executor Executor* Query::executor() { if (_executor == nullptr) { // the executor is a singleton per query - _executor = new Executor(literalSizeThreshold()); + _executor.reset(new Executor(literalSizeThreshold())); } TRI_ASSERT(_executor != nullptr); - return _executor; + return _executor.get(); } /// @brief enter a V8 context @@ -1226,16 +1275,15 @@ void Query::init() { } TRI_ASSERT(_id == 0); - TRI_ASSERT(_ast == nullptr); - _id = Query::NextId(); + TRI_ASSERT(_id != 0); TRI_ASSERT(_profile == nullptr); - _profile = new Profile(this); + _profile.reset(new Profile(this)); enterState(INITIALIZATION); TRI_ASSERT(_ast == nullptr); - _ast = new Ast(this); + _ast.reset(new Ast(this)); } /// @brief log a query @@ -1245,16 +1293,20 @@ void Query::log() { LOG_TOPIC(TRACE, Logger::QUERIES) << "executing query " << _id << ": '" - << std::string(_queryString, (std::min)(_queryLength, MaxLength)) - .append(_queryLength > MaxLength ? "..." : "") << "'"; + << std::string(_queryString, (std::min)(_queryStringLength, MaxLength)) + .append(_queryStringLength > MaxLength ? "..." : "") << "'"; } } /// @brief calculate a hash value for the query and bind parameters uint64_t Query::hash() const { + if (_queryString == nullptr) { + return DontCache; + } + // hash the query string first uint64_t hash = arangodb::aql::QueryCache::instance()->hashQueryString( - _queryString, _queryLength); + _queryString, _queryStringLength); // handle "fullCount" option. if this option is set, the query result will // be different to when it is not set! @@ -1270,6 +1322,14 @@ uint64_t Query::hash() const { } else { hash = fasthash64(TRI_CHAR_LENGTH_PAIR("count:false"), hash); } + + // also hash "optimizer" options + VPackSlice options = basics::VelocyPackHelper::EmptyObjectValue(); + + if (_options != nullptr && _options->slice().isObject()) { + options = _options->slice().get("optimizer"); + } + hash ^= options.hash(); // blend query hash with bind parameters return hash ^ _bindParameters.hash(); @@ -1277,7 +1337,7 @@ uint64_t Query::hash() const { /// @brief whether or not the query cache can be used for the query bool Query::canUseQueryCache() const { - if (_queryString == nullptr || _queryLength < 8) { + if (_queryString == nullptr || _queryStringLength < 8) { return false; } @@ -1302,16 +1362,17 @@ bool Query::canUseQueryCache() const { return false; } -/// @brief neatly format transaction error to the user. -QueryResult Query::transactionError(int errorCode) const { +/// @brief neatly format exception messages for the users +std::string Query::buildErrorMessage(int errorCode) const { std::string err(TRI_errno_string(errorCode)); if (_queryString != nullptr && verboseErrors()) { - err += - std::string("\nwhile executing:\n") + _queryString + std::string("\n"); + err += "\nwhile executing:\n"; + err.append(_queryString, _queryStringLength); + err += "\n"; } - return QueryResult(errorCode, err); + return err; } /// @brief read the "optimizer.inspectSimplePlans" section from the options @@ -1401,8 +1462,7 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) { // shutdown may fail but we must not throw here // (we're also called from the destructor) } - delete _engine; - _engine = nullptr; + _engine.reset(); } if (_trx != nullptr) { @@ -1411,17 +1471,11 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) { _trx = nullptr; } - if (_parser != nullptr) { - delete _parser; - _parser = nullptr; - } - _plan.reset(); } /// @brief create a TransactionContext -std::shared_ptr -Query::createTransactionContext() { +std::shared_ptr Query::createTransactionContext() { if (_contextOwnedByExterior) { // we can use v8 return arangodb::V8TransactionContext::Create(_vocbase, true); @@ -1430,7 +1484,7 @@ Query::createTransactionContext() { return arangodb::StandaloneTransactionContext::Create(_vocbase); } -/// @brief look up a graph either from our cache list or from the _graphs +/// @brief look up a graph either from our cache list or from the _graphs /// collection Graph const* Query::lookupGraphByName(std::string const& name) { auto it = _graphs.find(name); @@ -1440,7 +1494,7 @@ Graph const* Query::lookupGraphByName(std::string const& name) { } std::unique_ptr g( - arangodb::lookupGraphByName(_vocbase, name)); + arangodb::lookupGraphByName(createTransactionContext(), name)); if (g == nullptr) { return nullptr; @@ -1450,7 +1504,7 @@ Graph const* Query::lookupGraphByName(std::string const& name) { return g.release(); } - + /// @brief returns the next query id TRI_voc_tick_t Query::NextId() { return NextQueryId.fetch_add(1, std::memory_order_seq_cst); diff --git a/arangod/Aql/Query.h b/arangod/Aql/Query.h index 142d61fa5c..c07ec4c53c 100644 --- a/arangod/Aql/Query.h +++ b/arangod/Aql/Query.h @@ -59,7 +59,6 @@ class Ast; class ExecutionEngine; class ExecutionPlan; class Executor; -class Parser; class Query; class QueryRegistry; @@ -164,10 +163,12 @@ class Query { char const* queryString() const { return _queryString; } /// @brief get the length of the query string - size_t queryLength() const { return _queryLength; } + size_t queryLength() const { return _queryStringLength; } /// @brief getter for _ast - Ast* ast() const { return _ast; } + Ast* ast() const { + return _ast.get(); + } /// @brief should we return verbose plans? bool verbosePlans() const { return getBooleanOption("verbosePlans", false); } @@ -238,12 +239,8 @@ class Query { /// @brief register a warning void registerWarning(int, char const* = nullptr); - - /// @brief prepare an AQL query, this is a preparation for execute, but - /// execute calls it internally. The purpose of this separate method is - /// to be able to only prepare a query from VelocyPack and then store it in the - /// QueryRegistry. - QueryResult prepare(QueryRegistry*); + + void prepare(QueryRegistry*, uint64_t queryStringHash); /// @brief execute an AQL query QueryResult execute(QueryRegistry*); @@ -262,10 +259,10 @@ class Query { Executor* executor(); /// @brief return the engine, if prepared - ExecutionEngine* engine() { return _engine; } + ExecutionEngine* engine() const { return _engine.get(); } /// @brief inject the engine - void engine(ExecutionEngine* engine) { _engine = engine; } + void engine(ExecutionEngine* engine); /// @brief return the transaction, if prepared inline transaction::Methods* trx() { return _trx; } @@ -333,6 +330,12 @@ class Query { /// @brief initializes the query void init(); + /// @brief prepare an AQL query, this is a preparation for execute, but + /// execute calls it internally. The purpose of this separate method is + /// to be able to only prepare a query from VelocyPack and then store it in the + /// QueryRegistry. + ExecutionPlan* prepare(); + void setExecutionTime(); /// @brief log a query @@ -371,8 +374,8 @@ class Query { /// @brief read the "optimizer.rules" section from the options std::vector getRulesFromOptions() const; - /// @brief neatly format transaction errors to the user. - QueryResult transactionError(int errorCode) const; + /// @brief neatly format exception messages for the users + std::string buildErrorMessage(int errorCode) const; /// @brief enter a new state void enterState(ExecutionState); @@ -400,7 +403,7 @@ class Query { TRI_vocbase_t* _vocbase; /// @brief V8 code executor - Executor* _executor; + std::unique_ptr _executor; /// @brief the currently used V8 context V8Context* _context; @@ -412,7 +415,7 @@ class Query { char const* _queryString; /// @brief length of the query string in bytes - size_t const _queryLength; + size_t const _queryStringLength; /// @brief query in a VelocyPack structure std::shared_ptr const _queryBuilder; @@ -428,20 +431,17 @@ class Query { /// @brief _ast, we need an ast to manage the memory for AstNodes, even /// if we do not have a parser, because AstNodes occur in plans and engines - Ast* _ast; + std::unique_ptr _ast; /// @brief query execution profile - Profile* _profile; + std::unique_ptr _profile; /// @brief current state the query is in (used for profiling and error /// messages) ExecutionState _state; /// @brief the ExecutionPlan object, if the query is prepared - std::unique_ptr _plan; - - /// @brief the Parser object, if the query is prepared - Parser* _parser; + std::shared_ptr _plan; /// @brief the transaction object, in a distributed query every part of /// the query has its own transaction object. The transaction object is @@ -449,7 +449,7 @@ class Query { transaction::Methods* _trx; /// @brief the ExecutionEngine object, if the query is prepared - ExecutionEngine* _engine; + std::unique_ptr _engine; /// @brief maximum number of warnings size_t _maxWarningCount; diff --git a/arangod/Aql/QueryCache.cpp b/arangod/Aql/QueryCache.cpp index 8bd563942b..5b1f7b9edd 100644 --- a/arangod/Aql/QueryCache.cpp +++ b/arangod/Aql/QueryCache.cpp @@ -21,7 +21,7 @@ /// @author Jan Steemann //////////////////////////////////////////////////////////////////////////////// -#include "Aql/QueryCache.h" +#include "QueryCache.h" #include "Basics/fasthash.h" #include "Basics/Exceptions.h" #include "Basics/MutexLocker.h" diff --git a/arangod/Aql/QueryResources.cpp b/arangod/Aql/QueryResources.cpp index c7551b54db..12be0218b2 100644 --- a/arangod/Aql/QueryResources.cpp +++ b/arangod/Aql/QueryResources.cpp @@ -55,6 +55,12 @@ QueryResources::~QueryResources() { _resourceMonitor->decreaseMemoryUsage(_nodes.size() * sizeof(AstNode) + _nodes.capacity() * sizeof(AstNode*)); } + +// TODO: FIXME +void QueryResources::steal() { + _strings.clear(); + _nodes.clear(); +} /// @brief add a node to the list of nodes void QueryResources::addNode(AstNode* node) { diff --git a/arangod/Aql/QueryResources.h b/arangod/Aql/QueryResources.h index 460f9f48ed..11255ab93f 100644 --- a/arangod/Aql/QueryResources.h +++ b/arangod/Aql/QueryResources.h @@ -41,7 +41,9 @@ class QueryResources { explicit QueryResources(ResourceMonitor*); ~QueryResources(); - + + void steal(); + /// @brief add a node to the list of nodes void addNode(AstNode*); diff --git a/arangod/Aql/QueryResultV8.h b/arangod/Aql/QueryResultV8.h index 09b4a7a17b..1864176e66 100644 --- a/arangod/Aql/QueryResultV8.h +++ b/arangod/Aql/QueryResultV8.h @@ -44,6 +44,7 @@ struct QueryResultV8 : public QueryResult { QueryResultV8(int code, std::string const& details) : QueryResult(code, details), result() {} + QueryResultV8() : QueryResult(TRI_ERROR_NO_ERROR) {} explicit QueryResultV8(int code) : QueryResult(code, ""), result() {} v8::Handle result; diff --git a/arangod/Aql/RestAqlHandler.cpp b/arangod/Aql/RestAqlHandler.cpp index 0f9e70b0e2..7cd5088538 100644 --- a/arangod/Aql/RestAqlHandler.cpp +++ b/arangod/Aql/RestAqlHandler.cpp @@ -26,6 +26,7 @@ #include "Aql/ClusterBlocks.h" #include "Aql/ExecutionBlock.h" #include "Aql/ExecutionEngine.h" +#include "Aql/Query.h" #include "Basics/StaticStrings.h" #include "Basics/StringUtils.h" #include "Basics/VPackStringBufferAdapter.h" @@ -95,14 +96,18 @@ void RestAqlHandler::createQueryFromVelocyPack() { VelocyPackHelper::getStringValue(querySlice, "part", ""); auto planBuilder = std::make_shared(VPackBuilder::clone(plan)); - auto query = new Query(false, _vocbase, planBuilder, options, - (part == "main" ? PART_MAIN : PART_DEPENDENT)); - QueryResult res = query->prepare(_queryRegistry); - if (res.code != TRI_ERROR_NO_ERROR) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << res.details; - generateError(rest::ResponseCode::BAD, - TRI_ERROR_QUERY_BAD_JSON_PLAN, res.details); - delete query; + auto query = std::make_unique(false, _vocbase, planBuilder, options, + (part == "main" ? PART_MAIN : PART_DEPENDENT)); + + try { + query->prepare(_queryRegistry, 0); + } catch (std::exception const& ex) { + LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what(); + generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what()); + return; + } catch (...) { + LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query"; + generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN); return; } @@ -116,14 +121,15 @@ void RestAqlHandler::createQueryFromVelocyPack() { } _qId = TRI_NewTickServer(); + auto transactionContext = query->trx()->transactionContext().get(); try { - _queryRegistry->insert(_qId, query, ttl); + _queryRegistry->insert(_qId, query.get(), ttl); + query.release(); } catch (...) { LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry"; generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL, "could not keep query in registry"); - delete query; return; } @@ -139,8 +145,7 @@ void RestAqlHandler::createQueryFromVelocyPack() { return; } - sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), - query->trx()->transactionContext().get()); + sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext); } // POST method for /_api/aql/parse (internal) @@ -165,14 +170,12 @@ void RestAqlHandler::parseQuery() { return; } - auto query = - new Query(false, _vocbase, queryString.c_str(), queryString.size(), + auto query = std::make_unique(false, _vocbase, queryString.c_str(), queryString.size(), std::shared_ptr(), nullptr, PART_MAIN); QueryResult res = query->parse(); if (res.code != TRI_ERROR_NO_ERROR) { LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the Query: " << res.details; generateError(rest::ResponseCode::BAD, res.code, res.details); - delete query; return; } @@ -306,15 +309,19 @@ void RestAqlHandler::createQueryFromString() { auto options = std::make_shared( VPackBuilder::clone(querySlice.get("options"))); - auto query = new Query(false, _vocbase, queryString.c_str(), + auto query = std::make_unique(false, _vocbase, queryString.c_str(), queryString.size(), bindVars, options, (part == "main" ? PART_MAIN : PART_DEPENDENT)); - QueryResult res = query->prepare(_queryRegistry); - if (res.code != TRI_ERROR_NO_ERROR) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << res.details; - generateError(rest::ResponseCode::BAD, - TRI_ERROR_QUERY_BAD_JSON_PLAN, res.details); - delete query; + + try { + query->prepare(_queryRegistry, 0); + } catch (std::exception const& ex) { + LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what(); + generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what()); + return; + } catch (...) { + LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query"; + generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN); return; } @@ -327,15 +334,16 @@ void RestAqlHandler::createQueryFromString() { ttl = arangodb::basics::StringUtils::doubleDecimal(ttlstring); } + auto transactionContext = query->trx()->transactionContext().get(); _qId = TRI_NewTickServer(); try { - _queryRegistry->insert(_qId, query, ttl); + _queryRegistry->insert(_qId, query.get(), ttl); + query.release(); } catch (...) { LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry"; generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL, "could not keep query in registry"); - delete query; return; } @@ -351,8 +359,7 @@ void RestAqlHandler::createQueryFromString() { return; } - sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), - query->trx()->transactionContext().get()); + sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext); } // PUT method for /_api/aql//, (internal) diff --git a/arangod/Aql/ShortestPathBlock.cpp b/arangod/Aql/ShortestPathBlock.cpp index 6f0ab85f58..e768762516 100644 --- a/arangod/Aql/ShortestPathBlock.cpp +++ b/arangod/Aql/ShortestPathBlock.cpp @@ -25,6 +25,7 @@ #include "Aql/AqlItemBlock.h" #include "Aql/ExecutionEngine.h" #include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" #include "Utils/OperationCursor.h" #include "Transaction/Methods.h" #include "VocBase/EdgeCollectionInfo.h" diff --git a/arangod/Aql/ShortestPathNode.cpp b/arangod/Aql/ShortestPathNode.cpp index 7c4b766ea0..99d7b547aa 100644 --- a/arangod/Aql/ShortestPathNode.cpp +++ b/arangod/Aql/ShortestPathNode.cpp @@ -28,6 +28,7 @@ #include "Aql/Ast.h" #include "Aql/Collection.h" #include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" #include "Cluster/ClusterComm.h" #include "Indexes/Index.h" #include "Utils/CollectionNameResolver.h" @@ -178,6 +179,12 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, size_t id, } else { addEdgeColl(eColName, dir); } + + if (dir == TRI_EDGE_ANY) { + // collection with direction ANY must be added again + _graphInfo.add(VPackValue(eColName)); + } + } _graphInfo.close(); } else { @@ -337,9 +344,17 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, THROW_ARANGO_EXCEPTION(TRI_ERROR_GRAPH_NOT_FOUND); } - auto eColls = _graphObj->edgeCollections(); - for (auto const& n : eColls) { - _edgeColls.push_back(n); + auto const& eColls = _graphObj->edgeCollections(); + for (auto const& it : eColls) { + _edgeColls.push_back(it); + + // if there are twice as many directions as collections, this means we + // have a shortest path with direction ANY. we must add each collection + // twice then + if (_directions.size() == 2 * eColls.size()) { + // add collection again + _edgeColls.push_back(it); + } } } else { THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_BAD_JSON_PLAN, diff --git a/arangod/Aql/ShortestPathOptions.cpp b/arangod/Aql/ShortestPathOptions.cpp index b7bedafc70..4e2374d0d2 100644 --- a/arangod/Aql/ShortestPathOptions.cpp +++ b/arangod/Aql/ShortestPathOptions.cpp @@ -26,22 +26,23 @@ using namespace arangodb::aql; -ShortestPathOptions::ShortestPathOptions(VPackSlice const& slice) { - VPackSlice obj = slice.get("shortestpathFlags"); - - weightAttribute = ""; - if (obj.hasKey("weightAttribute")) { - VPackSlice v = obj.get("weightAttribute"); - if (v.isString()) { - weightAttribute = v.copyString(); +ShortestPathOptions::ShortestPathOptions(VPackSlice const& slice) + : weightAttribute(), defaultWeight(1) { + VPackSlice obj = slice.get("shortestPathFlags"); + + if (obj.isObject()) { + if (obj.hasKey("weightAttribute")) { + VPackSlice v = obj.get("weightAttribute"); + if (v.isString()) { + weightAttribute = v.copyString(); + } } - } - - defaultWeight = 1; - if (obj.hasKey("defaultWeight")) { - VPackSlice v = obj.get("defaultWeight"); - if (v.isNumber()) { - defaultWeight = v.getNumericValue(); + + if (obj.hasKey("defaultWeight")) { + VPackSlice v = obj.get("defaultWeight"); + if (v.isNumber()) { + defaultWeight = v.getNumericValue(); + } } } } diff --git a/arangod/Aql/ShortestPathOptions.h b/arangod/Aql/ShortestPathOptions.h index 9bf21a285c..040675182b 100644 --- a/arangod/Aql/ShortestPathOptions.h +++ b/arangod/Aql/ShortestPathOptions.h @@ -39,7 +39,7 @@ struct ShortestPathOptions { /// @brief constructor, using default values ShortestPathOptions() - : weightAttribute(""), + : weightAttribute(), defaultWeight(1) {} void toVelocyPack(arangodb::velocypack::Builder&) const; diff --git a/arangod/Aql/SortCondition.cpp b/arangod/Aql/SortCondition.cpp index fe63eaa298..dd6121bee6 100644 --- a/arangod/Aql/SortCondition.cpp +++ b/arangod/Aql/SortCondition.cpp @@ -85,6 +85,10 @@ SortCondition::SortCondition( if (node->type == NODE_TYPE_REFERENCE) { handled = true; + if (fieldNames.size() > 1) { + std::reverse(fieldNames.begin(), fieldNames.end()); + } + _fields.emplace_back(std::make_pair( static_cast(node->getData()), fieldNames)); @@ -146,7 +150,7 @@ size_t SortCondition::coveredAttributes( } auto const& field = _fields[fieldsPosition]; - + // ...and check if the field is present in the index definition too if (reference == field.first && arangodb::basics::AttributeName::isIdentical(field.second, indexAttributes[i], false)) { diff --git a/arangod/Aql/TraversalBlock.cpp b/arangod/Aql/TraversalBlock.cpp index 16118cab6b..efac58964c 100644 --- a/arangod/Aql/TraversalBlock.cpp +++ b/arangod/Aql/TraversalBlock.cpp @@ -27,6 +27,7 @@ #include "Aql/ExecutionNode.h" #include "Aql/ExecutionPlan.h" #include "Aql/Functions.h" +#include "Aql/Query.h" #include "Basics/ScopeGuard.h" #include "Basics/StringRef.h" #include "Cluster/ClusterComm.h" diff --git a/arangod/Aql/TraversalConditionFinder.cpp b/arangod/Aql/TraversalConditionFinder.cpp index 4943cc1bed..effceb5ac6 100644 --- a/arangod/Aql/TraversalConditionFinder.cpp +++ b/arangod/Aql/TraversalConditionFinder.cpp @@ -103,7 +103,57 @@ static AstNode* BuildExpansionReplacement(Ast* ast, AstNode const* condition, As return ast->createNodeBinaryOperator(type, lhs, rhs); } -static inline bool IsSupportedNode(AstNode const* node) { +static bool IsSupportedNode(Variable const* pathVar, AstNode const* node) { + // do a quick first check for all comparisons + switch (node->type) { + case NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_NE: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_LT: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_LE: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_GT: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_GE: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_IN: + case NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN: + case NODE_TYPE_OPERATOR_BINARY_EQ: + case NODE_TYPE_OPERATOR_BINARY_NE: + case NODE_TYPE_OPERATOR_BINARY_LT: + case NODE_TYPE_OPERATOR_BINARY_LE: + case NODE_TYPE_OPERATOR_BINARY_GT: + case NODE_TYPE_OPERATOR_BINARY_GE: + case NODE_TYPE_OPERATOR_BINARY_IN: + case NODE_TYPE_OPERATOR_BINARY_NIN: { + // the following types of expressions are not supported + // p.edges[0]._from op whatever attribute access + // whatever attribute access op p.edges[0]._from + AstNode const* lhs = node->getMember(0); + AstNode const* rhs = node->getMember(1); + + if (lhs->isAttributeAccessForVariable(pathVar, true)) { + // p.xxx op whatever + if (rhs->type != NODE_TYPE_VALUE && + rhs->type != NODE_TYPE_ARRAY && + rhs->type != NODE_TYPE_OBJECT && + rhs->type != NODE_TYPE_REFERENCE) { + return false; + } + } else if (rhs->isAttributeAccessForVariable(pathVar, true)) { + // whatever op p.xxx + if (lhs->type != NODE_TYPE_VALUE && + lhs->type != NODE_TYPE_ARRAY && + lhs->type != NODE_TYPE_OBJECT && + lhs->type != NODE_TYPE_REFERENCE) { + return false; + } + } + break; + } + default: { + // intentionally no other cases defined... + // we'll simply fall through to the next switch..case statement + break; + } + } + switch (node->type) { case NODE_TYPE_VARIABLE: case NODE_TYPE_OPERATOR_UNARY_PLUS: @@ -169,7 +219,7 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent, Variable const* pathVar, bool& conditionIsImpossible) { AstNode* node = parent->getMemberUnchecked(testIndex); - if (!IsSupportedNode(node)) { + if (!IsSupportedNode(pathVar, node)) { return false; } // We need to walk through each branch and validate: @@ -193,11 +243,11 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent, // We define that patternStep >= 6 is complete Match. unsigned char patternStep = 0; - auto supportedGuard = [¬Supported](AstNode const* n, void*) -> bool { + auto supportedGuard = [¬Supported, pathVar](AstNode const* n, void*) -> bool { if (notSupported) { return false; } - if (!IsSupportedNode(n)) { + if (!IsSupportedNode(pathVar, n)) { notSupported = true; return false; } diff --git a/arangod/Aql/TraversalNode.cpp b/arangod/Aql/TraversalNode.cpp index 5c6cb0b7b2..5e03a6dfba 100644 --- a/arangod/Aql/TraversalNode.cpp +++ b/arangod/Aql/TraversalNode.cpp @@ -26,8 +26,9 @@ //////////////////////////////////////////////////////////////////////////////// #include "TraversalNode.h" -#include "Aql/ExecutionPlan.h" #include "Aql/Ast.h" +#include "Aql/ExecutionPlan.h" +#include "Aql/Query.h" #include "Aql/SortCondition.h" #include "Cluster/ClusterComm.h" #include "Indexes/Index.h" diff --git a/arangod/Aql/VariableGenerator.cpp b/arangod/Aql/VariableGenerator.cpp index d0b53f6037..8021857bb2 100644 --- a/arangod/Aql/VariableGenerator.cpp +++ b/arangod/Aql/VariableGenerator.cpp @@ -33,10 +33,11 @@ using namespace arangodb::aql; /// @brief create the generator -VariableGenerator::VariableGenerator() : _variables(), _id(0) { +VariableGenerator::VariableGenerator() + : _variables(), _id(0) { _variables.reserve(8); } - + /// @brief destroy the generator VariableGenerator::~VariableGenerator() { // free all variables @@ -67,79 +68,50 @@ Variable* VariableGenerator::createVariable(char const* name, size_t length, bool isUserDefined) { TRI_ASSERT(name != nullptr); - auto variable = new Variable(std::string(name, length), nextId()); + auto variable = std::make_unique(std::string(name, length), nextId()); if (isUserDefined) { TRI_ASSERT(variable->isUserDefined()); } - try { - _variables.emplace(variable->id, variable); - } catch (...) { - // prevent memleak - delete variable; - throw; - } - - return variable; + _variables.emplace(variable->id, variable.get()); + return variable.release(); } /// @brief generate a variable Variable* VariableGenerator::createVariable(std::string const& name, bool isUserDefined) { - auto variable = new Variable(name, nextId()); + auto variable = std::make_unique(name, nextId()); if (isUserDefined) { TRI_ASSERT(variable->isUserDefined()); } - try { - _variables.emplace(variable->id, variable); - } catch (...) { - // prevent memleak - delete variable; - throw; - } - - return variable; + _variables.emplace(variable->id, variable.get()); + return variable.release(); } Variable* VariableGenerator::createVariable(Variable const* original) { TRI_ASSERT(original != nullptr); - auto variable = original->clone(); + std::unique_ptr variable(original->clone()); - try { - _variables.emplace(variable->id, variable); - } catch (...) { - // prevent memleak - delete variable; - throw; - } - - return variable; + _variables.emplace(variable->id, variable.get()); + return variable.release(); } /// @brief generate a variable from VelocyPack -Variable* VariableGenerator::createVariable( - VPackSlice const slice) { - auto variable = new Variable(slice); +Variable* VariableGenerator::createVariable(VPackSlice const slice) { + auto variable = std::make_unique(slice); auto existing = getVariable(variable->id); + if (existing != nullptr) { // variable already existed. - delete variable; return existing; } - try { - _variables.emplace(variable->id, variable); - } catch (...) { - // prevent memleak - delete variable; - throw; - } - - return variable; + _variables.emplace(variable->id, variable.get()); + return variable.release(); } /// @brief generate a temporary variable diff --git a/arangod/Aql/VariableGenerator.h b/arangod/Aql/VariableGenerator.h index 362e0380af..3eb01aca62 100644 --- a/arangod/Aql/VariableGenerator.h +++ b/arangod/Aql/VariableGenerator.h @@ -35,13 +35,16 @@ class VariableGenerator { public: /// @brief create the generator VariableGenerator(); + + VariableGenerator(VariableGenerator const& other) = delete; + VariableGenerator& operator=(VariableGenerator const& other) = delete; /// @brief destroy the generator ~VariableGenerator(); public: /// @brief return a map of all variable ids with their names - std::unordered_map variables(bool) const; + std::unordered_map variables(bool includeTemporaries) const; /// @brief generate a variable Variable* createVariable(char const*, size_t, bool); diff --git a/arangod/CMakeLists.txt b/arangod/CMakeLists.txt index 4df4a18ce0..e4ae525bce 100644 --- a/arangod/CMakeLists.txt +++ b/arangod/CMakeLists.txt @@ -143,6 +143,7 @@ SET(ARANGOD_SOURCES Aql/Optimizer.cpp Aql/OptimizerRules.cpp Aql/Parser.cpp + Aql/PlanCache.cpp Aql/Quantifier.cpp Aql/Query.cpp Aql/QueryCache.cpp @@ -168,6 +169,20 @@ SET(ARANGOD_SOURCES Aql/VariableGenerator.cpp Aql/grammar.cpp Aql/tokens.cpp + Cache/Cache.cpp + Cache/CacheManagerFeature.cpp + Cache/CacheManagerFeatureThreads.cpp + Cache/CachedValue.cpp + Cache/Manager.cpp + Cache/ManagerTasks.cpp + Cache/Metadata.cpp + Cache/PlainBucket.cpp + Cache/PlainCache.cpp + Cache/Rebalancer.cpp + Cache/State.cpp + Cache/TransactionalBucket.cpp + Cache/TransactionalCache.cpp + Cache/TransactionWindow.cpp Cluster/AgencyCallback.cpp Cluster/AgencyCallbackRegistry.cpp Cluster/ClusterComm.cpp diff --git a/arangod/Cache/Cache.cpp b/arangod/Cache/Cache.cpp new file mode 100644 index 0000000000..fc5df009c2 --- /dev/null +++ b/arangod/Cache/Cache.cpp @@ -0,0 +1,339 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/Cache.h" +#include "Basics/Common.h" +#include "Basics/fasthash.h" +#include "Cache/CachedValue.h" +#include "Cache/Manager.h" +#include "Cache/Metadata.h" +#include "Cache/State.h" + +#include +#include +#include +#include + +using namespace arangodb::cache; + +Cache::Finding::Finding(CachedValue* v) : _value(v) { + if (_value != nullptr) { + _value->lease(); + } +} + +Cache::Finding::Finding(Finding const& other) : _value(other._value) { + if (_value != nullptr) { + _value->lease(); + } +} + +Cache::Finding::Finding(Finding&& other) : _value(other._value) { + other._value = nullptr; +} + +Cache::Finding& Cache::Finding::operator=(Finding const& other) { + if (&other == this) { + return *this; + } + + if (_value != nullptr) { + _value->release(); + } + + _value = other._value; + if (_value != nullptr) { + _value->lease(); + } + + return *this; +} + +Cache::Finding& Cache::Finding::operator=(Finding&& other) { + if (&other == this) { + return *this; + } + + if (_value != nullptr) { + _value->release(); + } + + _value = other._value; + other._value = nullptr; + + return *this; +} + +Cache::Finding::~Finding() { + if (_value != nullptr) { + _value->release(); + } +} + +void Cache::Finding::reset(CachedValue* v) { + if (_value != nullptr) { + _value->release(); + } + + _value = v; + if (_value != nullptr) { + _value->lease(); + } +} + +bool Cache::Finding::found() const { return (_value != nullptr); } + +CachedValue const* Cache::Finding::value() const { return _value; } + +CachedValue* Cache::Finding::copy() const { + return ((_value == nullptr) ? nullptr : _value->copy()); +} + +void Cache::destroy(std::shared_ptr cache) { + if (cache.get() != nullptr) { + cache->shutdown(); + } +} + +uint64_t Cache::limit() { + uint64_t limit = 0; + _state.lock(); + if (isOperational()) { + _metadata->lock(); + limit = _metadata->softLimit(); + _metadata->unlock(); + } + _state.unlock(); + return limit; +} + +uint64_t Cache::usage() { + uint64_t usage = 0; + _state.lock(); + if (isOperational()) { + _metadata->lock(); + usage = _metadata->usage(); + _metadata->unlock(); + } + _state.unlock(); + return usage; +} + +bool Cache::resize(uint64_t requestedLimit) { + _state.lock(); + bool allowed = isOperational(); + bool resized = false; + startOperation(); + _state.unlock(); + + if (allowed) { + // wait for previous resizes to finish + while (true) { + _metadata->lock(); + if (!_metadata->isSet(State::Flag::resizing)) { + _metadata->unlock(); + break; + } + _metadata->unlock(); + } + + resized = requestResize(requestedLimit, false); + } + endOperation(); + return resized; +} + +Cache::Cache(Manager* manager, uint64_t requestedLimit, bool allowGrowth, + std::function deleter) + : _state(), + _allowGrowth(allowGrowth), + _evictionStats(1024), + _insertionCount(0), + _manager(manager), + _openOperations(), + _migrateRequestTime(std::chrono::steady_clock::now()), + _resizeRequestTime(std::chrono::steady_clock::now()) { + try { + _metadata = _manager->registerCache(this, requestedLimit, deleter); + } catch (std::bad_alloc) { + // could not register, mark as non-operational + if (!_state.isSet(State::Flag::shutdown)) { + _state.toggleFlag(State::Flag::shutdown); + } + } +} + +bool Cache::isOperational() const { + TRI_ASSERT(_state.isLocked()); + return (!_state.isSet(State::Flag::shutdown) && + !_state.isSet(State::Flag::shuttingDown)); +} + +void Cache::startOperation() { ++_openOperations; } + +void Cache::endOperation() { --_openOperations; } + +bool Cache::isMigrating() const { + TRI_ASSERT(_state.isLocked()); + return _state.isSet(State::Flag::migrating); +} + +bool Cache::requestResize(uint64_t requestedLimit, bool internal) { + bool resized = false; + int64_t lockTries = internal ? 10LL : -1LL; + bool ok = _state.lock(lockTries); + if (ok) { + if (!internal || (_allowGrowth && (std::chrono::steady_clock::now() > + _resizeRequestTime))) { + _metadata->lock(); + uint64_t newLimit = + (requestedLimit > 0) ? requestedLimit : (_metadata->hardLimit() * 2); + _metadata->unlock(); + auto result = _manager->requestResize(_metadata, newLimit); + _resizeRequestTime = result.second; + resized = result.first; + } + _state.unlock(); + } + return resized; +} + +void Cache::requestMigrate(uint32_t requestedLogSize) { + if ((++_insertionCount & 0xFFF) == 0) { + auto stats = _evictionStats.getFrequencies(); + if (((stats->size() == 1) && + ((*stats)[0].first == static_cast(Stat::eviction))) || + ((stats->size() == 2) && + ((*stats)[0].second * 16 > (*stats)[1].second))) { + bool ok = _state.lock(10LL); + if (ok) { + if (!isMigrating() && + (std::chrono::steady_clock::now() > _migrateRequestTime)) { + _metadata->lock(); + uint32_t newLogSize = (requestedLogSize > 0) + ? requestedLogSize + : (_metadata->logSize() + 1); + _metadata->unlock(); + auto result = _manager->requestMigrate(_metadata, newLogSize); + _resizeRequestTime = result.second; + if (result.first) { + _evictionStats.clear(); + } + } + _state.unlock(); + } + } + } +} + +void Cache::freeValue(CachedValue* value) { + while (value->refCount.load() > 0) { + usleep(1); + } + + delete value; +} + +bool Cache::reclaimMemory(uint64_t size) { + _metadata->lock(); + _metadata->adjustUsageIfAllowed(-static_cast(size)); + bool underLimit = (_metadata->softLimit() >= _metadata->usage()); + _metadata->unlock(); + + return underLimit; +} + +uint32_t Cache::hashKey(void const* key, uint32_t keySize) const { + return (std::max)(static_cast(1), + fasthash32(key, keySize, 0xdeadbeefUL)); +} + +void Cache::recordStat(Cache::Stat stat) { + _evictionStats.insertRecord(static_cast(stat)); +} + +Manager::MetadataItr& Cache::metadata() { return _metadata; } + +void Cache::beginShutdown() { + _state.lock(); + if (!_state.isSet(State::Flag::shutdown) && + !_state.isSet(State::Flag::shuttingDown)) { + _state.toggleFlag(State::Flag::shuttingDown); + } + _state.unlock(); +} + +void Cache::shutdown() { + _state.lock(); + if (!_state.isSet(State::Flag::shutdown)) { + if (!_state.isSet(State::Flag::shuttingDown)) { + _state.toggleFlag(State::Flag::shuttingDown); + } + + while (_openOperations.load() > 0) { + _state.unlock(); + usleep(10); + _state.lock(); + } + + _state.clear(); + _state.toggleFlag(State::Flag::shutdown); + clearTables(); + _manager->unregisterCache(_metadata); + } + _state.unlock(); +} + +bool Cache::canResize() { + bool allowed = true; + _state.lock(); + if (isOperational()) { + _metadata->lock(); + if (_metadata->isSet(State::Flag::resizing)) { + allowed = false; + } + _metadata->unlock(); + } else { + allowed = false; + } + _state.unlock(); + + return allowed; +} + +bool Cache::canMigrate() { + bool allowed = true; + _state.lock(); + if (isOperational()) { + _metadata->lock(); + if (_metadata->isSet(State::Flag::migrating)) { + allowed = false; + } + _metadata->unlock(); + } else { + allowed = false; + } + _state.unlock(); + + return allowed; +} diff --git a/arangod/Cache/Cache.h b/arangod/Cache/Cache.h new file mode 100644 index 0000000000..87f26e0457 --- /dev/null +++ b/arangod/Cache/Cache.h @@ -0,0 +1,184 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_CACHE_H +#define ARANGODB_CACHE_CACHE_H + +#include "Basics/Common.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/Manager.h" +#include "Cache/ManagerTasks.h" +#include "Cache/Metadata.h" +#include "Cache/State.h" + +#include +#include +#include + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief The common structure of all caches managed by Manager. +/// +/// Any pure virtual functions are documented in derived classes implementing +/// them. +//////////////////////////////////////////////////////////////////////////////// +class Cache { + public: + typedef FrequencyBuffer StatBuffer; + + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief A helper class for managing CachedValue lifecycles. + /// + /// Returned to clients by Cache::find. Clients must destroy the Finding + /// object within a short period of time to allow proper memory management + /// within the cache system. If the underlying value needs to be retained for + /// any significant period of time, it must be copied so that the finding + /// object may be destroyed. + ////////////////////////////////////////////////////////////////////////////// + class Finding { + public: + Finding(CachedValue* v); + Finding(Finding const& other); + Finding(Finding&& other); + Finding& operator=(Finding const& other); + Finding& operator=(Finding&& other); + ~Finding(); + + //////////////////////////////////////////////////////////////////////////// + /// @brief Changes the underlying CachedValue pointer. + //////////////////////////////////////////////////////////////////////////// + void reset(CachedValue* v); + + //////////////////////////////////////////////////////////////////////////// + /// @brief Specifies whether the value was found. If not, value is nullptr. + //////////////////////////////////////////////////////////////////////////// + bool found() const; + + //////////////////////////////////////////////////////////////////////////// + /// @brief Returns the underlying value pointer. + //////////////////////////////////////////////////////////////////////////// + CachedValue const* value() const; + + //////////////////////////////////////////////////////////////////////////// + /// @brief Creates a copy of the underlying value and returns a pointer. + //////////////////////////////////////////////////////////////////////////// + CachedValue* copy() const; + + private: + CachedValue* _value; + }; + + public: + // primary functionality; documented in derived classes + virtual Finding find(void const* key, uint32_t keySize) = 0; + virtual bool insert(CachedValue* value) = 0; + virtual bool remove(void const* key, uint32_t keySize) = 0; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns the limit on memory usage for this cache in bytes. + ////////////////////////////////////////////////////////////////////////////// + uint64_t limit(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns the current memory usage for this cache in bytes. + ////////////////////////////////////////////////////////////////////////////// + uint64_t usage(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Request that this cache be given a new limit as specified. + /// + /// If there is enough free memory globally and the cache is not currently + /// resizing, the request should be granted. If downsizing the cache, it may + /// need to free some memory, which will be done in an asynchronous task. + ////////////////////////////////////////////////////////////////////////////// + bool resize(uint64_t requestedLimit = 0); + + protected: + State _state; + + // whether to allow the cache to resize larger when it fills + bool _allowGrowth; + + // structures to handle eviction-upon-insertion rate + enum class Stat : uint8_t { eviction = 1, noEviction = 2 }; + StatBuffer _evictionStats; + std::atomic _insertionCount; + + // allow communication with manager + Manager* _manager; + Manager::MetadataItr _metadata; + + // keep track of number of open operations to allow clean shutdown + std::atomic _openOperations; + + // times to wait until requesting is allowed again + Manager::time_point _migrateRequestTime; + Manager::time_point _resizeRequestTime; + + // friend class manager and tasks + friend class FreeMemoryTask; + friend class Manager; + friend class MigrateTask; + + protected: + // shutdown cache and let its memory be reclaimed + static void destroy(std::shared_ptr cache); + + Cache(Manager* manager, uint64_t requestedLimit, bool allowGrowth, + std::function deleter); + + virtual ~Cache() = default; + + bool isOperational() const; + void startOperation(); + void endOperation(); + + bool isMigrating() const; + bool requestResize(uint64_t requestedLimit = 0, bool internal = true); + void requestMigrate(uint32_t requestedLogSize = 0); + + void freeValue(CachedValue* value); + bool reclaimMemory(uint64_t size); + virtual void clearTables() = 0; + + uint32_t hashKey(void const* key, uint32_t keySize) const; + void recordStat(Cache::Stat stat); + + // management + Manager::MetadataItr& metadata(); + void beginShutdown(); + void shutdown(); + bool canResize(); + bool canMigrate(); + virtual bool freeMemory() = 0; + virtual bool migrate() = 0; +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/CacheManagerFeature.cpp b/arangod/Cache/CacheManagerFeature.cpp new file mode 100644 index 0000000000..97d570cf96 --- /dev/null +++ b/arangod/Cache/CacheManagerFeature.cpp @@ -0,0 +1,123 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Dr. Frank Celler +//////////////////////////////////////////////////////////////////////////////// + +#include "CacheManagerFeature.h" + +#ifdef _WIN32 +#include +#include +#endif + +#include "ApplicationFeatures/ApplicationServer.h" +#include "Basics/ArangoGlobalContext.h" +#include "Basics/WorkMonitor.h" +#include "Basics/asio-helper.h" +#include "Cache/CacheManagerFeatureThreads.h" +#include "Cache/Manager.h" +#include "Logger/LogAppender.h" +#include "ProgramOptions/ProgramOptions.h" +#include "ProgramOptions/Section.h" +//#include "RestServer/ServerFeature.h" +#include "Scheduler/Scheduler.h" +#include "Scheduler/SchedulerFeature.h" + +using namespace arangodb; +using namespace arangodb::application_features; +using namespace arangodb::basics; +using namespace arangodb::cache; +using namespace arangodb::options; +using namespace arangodb::rest; + +Manager* CacheManagerFeature::MANAGER = nullptr; + +static constexpr uint64_t MIN_REBALANCING_INTERVAL = 500 * 1000; + +CacheManagerFeature::CacheManagerFeature( + application_features::ApplicationServer* server) + : ApplicationFeature(server, "CacheManager"), + _manager(nullptr), + _rebalancer(nullptr), + _cacheSize(16 * 1024 * 1024), + _rebalancingInterval(2 * 1000 * 1000) { + // TODO: set intelligent default for _cacheSize + setOptional(true); + requiresElevatedPrivileges(false); + startsAfter("Scheduler"); +} + +CacheManagerFeature::~CacheManagerFeature() {} + +void CacheManagerFeature::collectOptions( + std::shared_ptr options) { + options->addSection("cache", "Configure the hash cache"); + + options->addOption("--cache.size", "size of cache in bytes", + new UInt64Parameter(&_cacheSize)); + + options->addOption("--cache.rebalancing-interval", + "microseconds between rebalancing attempts", + new UInt64Parameter(&_rebalancingInterval)); +} + +void CacheManagerFeature::validateOptions( + std::shared_ptr) { + if (_cacheSize < Manager::MINIMUM_SIZE) { + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) + << "invalid value for `--cache.size', need at least " + << Manager::MINIMUM_SIZE; + FATAL_ERROR_EXIT(); + } + + if (_cacheSize < (MIN_REBALANCING_INTERVAL)) { + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) + << "invalid value for `--cache.rebalancing-interval', need at least " + << (MIN_REBALANCING_INTERVAL); + FATAL_ERROR_EXIT(); + } +} + +void CacheManagerFeature::start() { + auto scheduler = SchedulerFeature::SCHEDULER; + auto ioService = (scheduler == nullptr) ? nullptr : scheduler->ioService(); + _manager.reset(new Manager(ioService, _cacheSize)); + MANAGER = _manager.get(); + _rebalancer.reset( + new CacheRebalancerThread(_manager.get(), _rebalancingInterval)); + _rebalancer->start(); + LOG_TOPIC(DEBUG, Logger::STARTUP) << "cache manager has started"; +} + +void CacheManagerFeature::beginShutdown() { + if (_manager != nullptr) { + _manager->beginShutdown(); + _rebalancer->beginShutdown(); + } +} + +void CacheManagerFeature::stop() { + if (_manager != nullptr) { + _manager->shutdown(); + } +} + +void CacheManagerFeature::unprepare() { MANAGER = nullptr; } diff --git a/arangod/Cache/CacheManagerFeature.h b/arangod/Cache/CacheManagerFeature.h new file mode 100644 index 0000000000..7646559ba9 --- /dev/null +++ b/arangod/Cache/CacheManagerFeature.h @@ -0,0 +1,60 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGOD_CACHE_CACHE_MANAGER_FEATURE_H +#define ARANGOD_CACHE_CACHE_MANAGER_FEATURE_H 1 + +#include "ApplicationFeatures/ApplicationFeature.h" + +#include "Basics/asio-helper.h" +#include "Cache/CacheManagerFeatureThreads.h" +#include "Cache/Manager.h" + +namespace arangodb { + +class CacheManagerFeature final + : public application_features::ApplicationFeature { + public: + static cache::Manager* MANAGER; + + public: + explicit CacheManagerFeature(application_features::ApplicationServer* server); + ~CacheManagerFeature(); + + public: + void collectOptions(std::shared_ptr) override final; + void validateOptions(std::shared_ptr) override final; + void start() override final; + void beginShutdown() override final; + void stop() override final; + void unprepare() override final; + + private: + std::unique_ptr _manager; + std::unique_ptr _rebalancer; + uint64_t _cacheSize; + uint64_t _rebalancingInterval; +}; +} + +#endif diff --git a/arangod/Cache/CacheManagerFeatureThreads.cpp b/arangod/Cache/CacheManagerFeatureThreads.cpp new file mode 100644 index 0000000000..925a89a069 --- /dev/null +++ b/arangod/Cache/CacheManagerFeatureThreads.cpp @@ -0,0 +1,61 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/CacheManagerFeatureThreads.h" +#include "Basics/Common.h" +#include "Basics/ConditionLocker.h" +#include "Basics/ConditionVariable.h" +#include "Basics/Thread.h" +#include "Cache/Manager.h" +#include "Cache/Rebalancer.h" + +#include + +using namespace arangodb; + +CacheRebalancerThread::CacheRebalancerThread(cache::Manager* manager, + uint64_t interval) + : Thread("CacheRebalancerThread"), + _manager(manager), + _rebalancer(_manager), + _fullInterval(interval), + _shortInterval(100) {} + +CacheRebalancerThread::~CacheRebalancerThread() { shutdown(); } + +void CacheRebalancerThread::beginShutdown() { + Thread::beginShutdown(); + + CONDITION_LOCKER(guard, _condition); + guard.signal(); +} + +void CacheRebalancerThread::run() { + while (!isStopping()) { + bool ran = _rebalancer.rebalance(); + uint64_t interval = ran ? _fullInterval : _shortInterval; + + CONDITION_LOCKER(guard, _condition); + guard.wait(interval); + } +} diff --git a/arangod/Cache/CacheManagerFeatureThreads.h b/arangod/Cache/CacheManagerFeatureThreads.h new file mode 100644 index 0000000000..c0a486590d --- /dev/null +++ b/arangod/Cache/CacheManagerFeatureThreads.h @@ -0,0 +1,57 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_MANAGER_FEATURE_THREADS_H +#define ARANGODB_CACHE_MANAGER_FEATURE_THREADS_H + +#include "Basics/Common.h" +#include "Basics/ConditionVariable.h" +#include "Basics/Thread.h" +#include "Cache/Manager.h" +#include "Cache/Rebalancer.h" + +#include + +namespace arangodb { + +class CacheRebalancerThread : public Thread { + public: + CacheRebalancerThread(cache::Manager* manager, uint64_t interval); + ~CacheRebalancerThread(); + + void beginShutdown() override; + + protected: + void run() override; + + private: + cache::Manager* _manager; + cache::Rebalancer _rebalancer; + uint64_t _fullInterval; + uint64_t _shortInterval; + basics::ConditionVariable _condition; +}; + +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/CachedValue.cpp b/arangod/Cache/CachedValue.cpp new file mode 100644 index 0000000000..6e0248a619 --- /dev/null +++ b/arangod/Cache/CachedValue.cpp @@ -0,0 +1,94 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/CachedValue.h" + +#include +#include + +using namespace arangodb::cache; + +uint8_t const* CachedValue::key() const { + uint8_t const* buf = reinterpret_cast(this); + return (buf + sizeof(CachedValue)); +} + +uint8_t const* CachedValue::value() const { + if (valueSize == 0) { + return nullptr; + } + + uint8_t const* buf = reinterpret_cast(this); + return (buf + sizeof(CachedValue) + keySize); +} + +uint64_t CachedValue::size() const { + uint64_t size = sizeof(CachedValue); + size += keySize; + size += valueSize; + return size; +} + +bool CachedValue::sameKey(void const* k, uint32_t kSize) const { + if (keySize != kSize) { + return false; + } + + return (0 == memcmp(key(), k, keySize)); +} + +void CachedValue::lease() { refCount++; } + +void CachedValue::release() { refCount--; } + +bool CachedValue::isFreeable() { return (refCount.load() == 0); } + +CachedValue* CachedValue::copy() const { + uint8_t* buf = new uint8_t[size()]; + memcpy(buf, this, size()); + return reinterpret_cast(buf); +} + +CachedValue* CachedValue::construct(void const* k, uint32_t kSize, + void const* v, uint64_t vSize) { + if (kSize == 0 || k == nullptr || (vSize > 0 && v == nullptr)) { + return nullptr; + } + + uint8_t* buf = new uint8_t[sizeof(CachedValue) + kSize + vSize]; + CachedValue* cv = reinterpret_cast(buf); + + cv->refCount = 0; + cv->keySize = kSize; + cv->valueSize = vSize; + std::memcpy(const_cast(cv->key()), k, kSize); + if (vSize > 0) { + std::memcpy(const_cast(cv->value()), v, vSize); + } + + return cv; +} + +void CachedValue::operator delete(void* ptr) { + delete[] reinterpret_cast(ptr); +} diff --git a/arangod/Cache/CachedValue.h b/arangod/Cache/CachedValue.h new file mode 100644 index 0000000000..b830728d11 --- /dev/null +++ b/arangod/Cache/CachedValue.h @@ -0,0 +1,117 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_CACHED_VALUE_H +#define ARANGODB_CACHE_CACHED_VALUE_H + +#include "Basics/Common.h" + +#include +#include + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief This is the beginning of a cache data entry. +/// +/// It will be allocated using new uint8_t[] with the correct size for header, +/// key and value. The key and value reside directly behind the header entries +/// contained in this struct. The reference count is used to lend CachedValues +/// to clients. +//////////////////////////////////////////////////////////////////////////////// +struct CachedValue { + ////////////////////////////////////////////////////////////////////////////// + /// @brief Reference count (to avoid premature deletion) + ////////////////////////////////////////////////////////////////////////////// + std::atomic refCount; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Size of the key in bytes + ////////////////////////////////////////////////////////////////////////////// + uint32_t keySize; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Size of the value in bytes + ////////////////////////////////////////////////////////////////////////////// + uint64_t valueSize; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns a pointer offset to the key + ////////////////////////////////////////////////////////////////////////////// + uint8_t const* key() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns a pointer offset to the value + ////////////////////////////////////////////////////////////////////////////// + uint8_t const* value() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns the allocated size of bytes including the key and value + ////////////////////////////////////////////////////////////////////////////// + uint64_t size() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Utility method to compare underlying key to external key + ////////////////////////////////////////////////////////////////////////////// + bool sameKey(void const* k, uint32_t kSize) const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Increase reference count + ////////////////////////////////////////////////////////////////////////////// + void lease(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Decrease reference count + ////////////////////////////////////////////////////////////////////////////// + void release(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks whether value can be freed (i.e. no references to it) + ////////////////////////////////////////////////////////////////////////////// + bool isFreeable(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Create a copy of this CachedValue object + ////////////////////////////////////////////////////////////////////////////// + CachedValue* copy() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Construct a CachedValue object from a given key and value + ////////////////////////////////////////////////////////////////////////////// + static CachedValue* construct(void const* k, uint32_t kSize, void const* v, + uint64_t vSize); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Custom deleter to handle casting issues + ////////////////////////////////////////////////////////////////////////////// + static void operator delete(void* ptr); +}; + +// ensure that header size is what we expect +static_assert(sizeof(CachedValue) == 16, "Expected sizeof(CachedValue) == 16."); + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/FrequencyBuffer.h b/arangod/Cache/FrequencyBuffer.h new file mode 100644 index 0000000000..41836733f6 --- /dev/null +++ b/arangod/Cache/FrequencyBuffer.h @@ -0,0 +1,140 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_FREQUENCY_BUFFER_H +#define ARANGODB_CACHE_FREQUENCY_BUFFER_H + +#include "Basics/Common.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Lockless structure to calculate approximate relative event +/// frequencies. +/// +/// Used to record events and then compute the number of occurrences of each +/// within a certain time-frame. The underlying structure is a circular buffer +/// which over-writes itself after it fills up (thus only maintaining a recent +/// window on the records). +//////////////////////////////////////////////////////////////////////////////// +template +class FrequencyBuffer { + public: + typedef std::vector> stats_t; + + private: + std::atomic _current; + uint64_t _capacity; + uint64_t _mask; + std::unique_ptr _buffer; + + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initialize with the given capacity. + ////////////////////////////////////////////////////////////////////////////// + FrequencyBuffer(uint64_t capacity) : _current(0) { + size_t i = 0; + for (; (1ULL << i) < capacity; i++) { + } + _capacity = (1 << i); + _mask = _capacity - 1; + _buffer.reset(new T[_capacity]()); + } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Reports the memory usage in bytes. + ////////////////////////////////////////////////////////////////////////////// + uint64_t memoryUsage() { + return ((_capacity * sizeof(T)) + sizeof(FrequencyBuffer)); + } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Insert an individual event record. + ////////////////////////////////////////////////////////////////////////////// + void insertRecord(T const& record) { + ++_current; + _buffer[_current & _mask] = record; + } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Remove all occurrences of the specified event record. + ////////////////////////////////////////////////////////////////////////////// + void purgeRecord(T const& record) { + for (size_t i = 0; i < _capacity; i++) { + if (_buffer[i] == record) { + _buffer[i] = T(); + } + } + } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Return a list of (event, count) pairs for each recorded event in + /// ascending order. + ////////////////////////////////////////////////////////////////////////////// + std::shared_ptr getFrequencies() const { + // calculate frequencies + std::unordered_map frequencies; + for (size_t i = 0; i < _capacity; i++) { + T entry = _buffer[i]; + if (entry != T()) { + frequencies[entry]++; + } + } + + // gather and sort frequencies + std::shared_ptr data(new stats_t()); + data->reserve(frequencies.size()); + for (auto f : frequencies) { + data->emplace_back(std::pair(f.first, f.second)); + } + std::sort(data->begin(), data->end(), + [](std::pair& left, std::pair& right) { + return left.second < right.second; + }); + + return data; + } + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Clear the buffer, removing all event records. + ////////////////////////////////////////////////////////////////////////////// + void clear() { + for (size_t i = 0; i < _capacity; i++) { + _buffer[i] = T(); + } + } +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/Manager.cpp b/arangod/Cache/Manager.cpp new file mode 100644 index 0000000000..fa0b48b834 --- /dev/null +++ b/arangod/Cache/Manager.cpp @@ -0,0 +1,725 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/Manager.h" +#include "Basics/Common.h" +#include "Basics/asio-helper.h" +#include "Cache/Cache.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/ManagerTasks.h" +#include "Cache/Metadata.h" +#include "Cache/PlainCache.h" +#include "Cache/State.h" +#include "Cache/TransactionalCache.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +using namespace arangodb::cache; + +uint64_t Manager::MINIMUM_SIZE = 1024 * 1024; + +static constexpr size_t TABLE_LOG_SIZE_ADJUSTMENT = 6; +static constexpr size_t MIN_TABLE_LOG_SIZE = 3; +static constexpr size_t MIN_LOG_SIZE = 10; +static constexpr uint64_t MIN_CACHE_SIZE = 1024; +// use 16 for sizeof std::list node -- should be valid for most libraries +static constexpr uint64_t CACHE_RECORD_OVERHEAD = sizeof(Metadata) + 16; +// assume at most 16 slots in each stack -- TODO: check validity +static constexpr uint64_t TABLE_LISTS_OVERHEAD = 32 * 16 * 8; +static constexpr int64_t TRIES_FAST = 100; + +Manager::Manager(boost::asio::io_service* ioService, uint64_t globalLimit) + : _state(), + _accessStats((globalLimit >= (1024ULL * 1024ULL * 1024ULL)) + ? ((1024ULL * 1024ULL) / sizeof(std::shared_ptr)) + : (globalLimit / 8192ULL)), + _accessCounter(0), + _caches(), + _globalSoftLimit(globalLimit), + _globalHardLimit(globalLimit), + _globalAllocation(sizeof(Manager) + TABLE_LISTS_OVERHEAD + + _accessStats.memoryUsage()), + _transactions(), + _ioService(ioService), + _resizeAttempt(0), + _outstandingTasks(0), + _rebalancingTasks(0), + _resizingTasks(0) { + TRI_ASSERT(_globalAllocation < _globalSoftLimit); + TRI_ASSERT(_globalAllocation < _globalHardLimit); +} + +Manager::~Manager() { shutdown(); } + +std::shared_ptr Manager::createCache(Manager::CacheType type, + uint64_t requestedLimit, + bool allowGrowth) { + std::shared_ptr result(nullptr); + _state.lock(); + bool allowed = isOperational(); + _state.unlock(); + + if (allowed) { + switch (type) { + case CacheType::Plain: + result = PlainCache::create(this, requestedLimit, allowGrowth); + break; + case CacheType::Transactional: + result = TransactionalCache::create(this, requestedLimit, allowGrowth); + break; + default: + break; + } + } + + return result; +} + +void Manager::destroyCache(std::shared_ptr cache) { + Cache::destroy(cache); +} + +void Manager::beginShutdown() { + _state.lock(); + if (isOperational()) { + _state.toggleFlag(State::Flag::shuttingDown); + for (MetadataItr metadata = _caches.begin(); metadata != _caches.end(); + metadata++) { + metadata->lock(); + metadata->cache()->beginShutdown(); + metadata->unlock(); + } + } + _state.unlock(); +} + +void Manager::shutdown() { + _state.lock(); + if (!_state.isSet(State::Flag::shutdown)) { + if (!_state.isSet(State::Flag::shuttingDown)) { + _state.toggleFlag(State::Flag::shuttingDown); + } + while (!_caches.empty()) { + _caches.begin()->lock(); + std::shared_ptr cache = _caches.begin()->cache(); + _caches.begin()->unlock(); + _state.unlock(); + cache->shutdown(); + _state.lock(); + } + freeUnusedTables(); + _state.clear(); + _state.toggleFlag(State::Flag::shutdown); + } + _state.unlock(); +} + +// change global cache limit +bool Manager::resize(uint64_t newGlobalLimit) { + if (newGlobalLimit < MINIMUM_SIZE) { + return false; + } + + bool success = true; + _state.lock(); + + if (!isOperational() || globalProcessRunning()) { + // shut(ting) down or still have another global process running already + success = false; + } else { + // otherwise we need to actually resize + _state.toggleFlag(State::Flag::resizing); + internalResize(newGlobalLimit, true); + } + + _state.unlock(); + return success; +} + +uint64_t Manager::globalLimit() { + _state.lock(); + uint64_t limit = + _state.isSet(State::Flag::resizing) ? _globalSoftLimit : _globalHardLimit; + _state.unlock(); + + return limit; +} + +uint64_t Manager::globalAllocation() { + _state.lock(); + uint64_t allocation = _globalAllocation; + _state.unlock(); + + return allocation; +} + +void Manager::startTransaction() { _transactions.start(); } + +void Manager::endTransaction() { _transactions.end(); } + +Manager::MetadataItr Manager::registerCache( + Cache* cache, uint64_t requestedLimit, + std::function deleter) { + uint32_t logSize = 0; + uint32_t tableLogSize = MIN_TABLE_LOG_SIZE; + for (; (1ULL << logSize) < requestedLimit; logSize++) { + } + uint64_t grantedLimit = 1ULL << logSize; + if (logSize > (TABLE_LOG_SIZE_ADJUSTMENT + MIN_TABLE_LOG_SIZE)) { + tableLogSize = logSize - TABLE_LOG_SIZE_ADJUSTMENT; + } + + _state.lock(); + if (!isOperational()) { + _state.unlock(); + throw std::bad_alloc(); + } + + while (logSize >= MIN_LOG_SIZE) { + uint64_t tableAllocation = + _tables[tableLogSize].empty() ? tableSize(tableLogSize) : 0; + if (increaseAllowed(grantedLimit + tableAllocation + + CACHE_RECORD_OVERHEAD)) { + break; + } + + grantedLimit >>= 1U; + logSize--; + if (tableLogSize > MIN_TABLE_LOG_SIZE) { + tableLogSize--; + } + } + + if (logSize < MIN_LOG_SIZE) { + _state.unlock(); + throw std::bad_alloc(); + } + + _globalAllocation += (grantedLimit + CACHE_RECORD_OVERHEAD); + _caches.emplace_front(std::shared_ptr(cache, deleter), grantedLimit); + MetadataItr metadata = _caches.begin(); + metadata->lock(); + leaseTable(metadata, tableLogSize); + metadata->unlock(); + _state.unlock(); + + return metadata; +} + +void Manager::unregisterCache(Manager::MetadataItr& metadata) { + _state.lock(); + + if (_caches.empty()) { + _state.unlock(); + return; + } + + metadata->lock(); + _globalAllocation -= (metadata->hardLimit() + CACHE_RECORD_OVERHEAD); + reclaimTables(metadata); + _accessStats.purgeRecord(metadata->cache()); + metadata->unlock(); + + _caches.erase(metadata); + + _state.unlock(); +} + +std::pair Manager::requestResize( + Manager::MetadataItr& metadata, uint64_t requestedLimit) { + Manager::time_point nextRequest = futureTime(30); + bool allowed = false; + + bool ok = _state.lock(TRIES_FAST); + if (ok) { + if (isOperational() && !_state.isSet(State::Flag::resizing)) { + metadata->lock(); + + if (!metadata->isSet(State::Flag::resizing) && + ((requestedLimit < metadata->hardLimit()) || + increaseAllowed(requestedLimit - metadata->hardLimit()))) { + allowed = true; + if (requestedLimit > metadata->hardLimit()) { + // if cache is growing, let it keep growing quickly + nextRequest = std::chrono::steady_clock::now(); + } + resizeCache(TaskEnvironment::none, metadata, + requestedLimit); // unlocks metadata + } else { + metadata->unlock(); + } + } + _state.unlock(); + } + + return std::pair(allowed, nextRequest); +} + +std::pair Manager::requestMigrate( + Manager::MetadataItr& metadata, uint32_t requestedLogSize) { + Manager::time_point nextRequest = futureTime(30); + bool allowed = false; + + bool ok = _state.lock(TRIES_FAST); + if (ok) { + if (isOperational() && !_state.isSet(State::Flag::resizing)) { + if (!_tables[requestedLogSize].empty() || + increaseAllowed(tableSize(requestedLogSize))) { + allowed = true; + } + if (allowed) { + metadata->lock(); + if (metadata->isSet(State::Flag::migrating)) { + allowed = false; + metadata->unlock(); + } else { + nextRequest = std::chrono::steady_clock::now(); + migrateCache(TaskEnvironment::none, metadata, + requestedLogSize); // unlocks metadata + } + } + } + _state.unlock(); + } + + return std::pair(allowed, nextRequest); +} + +void Manager::reportAccess(std::shared_ptr cache) { + if (((++_accessCounter) & 0x7FULL) == 0) { // record 1 in 128 + _accessStats.insertRecord(cache); + } +} + +bool Manager::isOperational() const { + TRI_ASSERT(_state.isLocked()); + return (!_state.isSet(State::Flag::shutdown) && + !_state.isSet(State::Flag::shuttingDown)); +} + +bool Manager::globalProcessRunning() const { + TRI_ASSERT(_state.isLocked()); + return (_state.isSet(State::Flag::rebalancing) || + _state.isSet(State::Flag::resizing)); +} + +boost::asio::io_service* Manager::ioService() { return _ioService; } + +void Manager::prepareTask(Manager::TaskEnvironment environment) { + _outstandingTasks++; + switch (environment) { + case TaskEnvironment::rebalancing: { + _rebalancingTasks++; + break; + } + case TaskEnvironment::resizing: { + _resizingTasks++; + break; + } + case TaskEnvironment::none: + default: { break; } + } +} + +void Manager::unprepareTask(Manager::TaskEnvironment environment) { + switch (environment) { + case TaskEnvironment::rebalancing: { + if ((--_rebalancingTasks) == 0) { + _state.lock(); + _state.toggleFlag(State::Flag::rebalancing); + _state.unlock(); + }; + break; + } + case TaskEnvironment::resizing: { + if ((--_resizingTasks) == 0) { + _state.lock(); + internalResize(_globalSoftLimit, false); + _state.unlock(); + }; + break; + } + case TaskEnvironment::none: + default: { break; } + } + + _outstandingTasks--; +} + +bool Manager::rebalance() { + _state.lock(); + if (!isOperational() || globalProcessRunning()) { + _state.unlock(); + return false; + } + + // start rebalancing + _state.toggleFlag(State::Flag::rebalancing); + + // determine strategy + + // allow background tasks if more than 7/8ths full + bool allowTasks = + _globalAllocation > (_globalHardLimit - (_globalHardLimit >> 3)); + + // be aggressive if more than 3/4ths full + bool beAggressive = + _globalAllocation > (_globalHardLimit - (_globalHardLimit >> 2)); + + // aim for 1/4th with background tasks, 1/8th if no tasks but aggressive, no + // goal otherwise + uint64_t goal = beAggressive ? (allowTasks ? (_globalAllocation >> 2) + : (_globalAllocation >> 3)) + : 0; + + // get stats on cache access to prioritize freeing from less frequently used + // caches first, so more frequently used ones stay large + std::shared_ptr cacheList = priorityList(); + + // just adjust limits + uint64_t reclaimed = resizeAllCaches(TaskEnvironment::rebalancing, cacheList, + allowTasks, beAggressive, goal); + _globalAllocation -= reclaimed; + + if (_rebalancingTasks.load() == 0) { + _state.toggleFlag(State::Flag::rebalancing); + } + + _state.unlock(); + return true; +} + +void Manager::internalResize(uint64_t newGlobalLimit, bool firstAttempt) { + TRI_ASSERT(_state.isLocked()); + bool done = false; + std::shared_ptr cacheList(nullptr); + uint64_t reclaimed = 0; + + if (firstAttempt) { + _resizeAttempt = 0; + } + + if (!isOperational()) { + // abort resizing process so we can shutdown + done = true; + } + + // if limit is safe, just set it + done = adjustGlobalLimitsIfAllowed(newGlobalLimit); + + // see if we can free enough from unused tables + if (!done) { + freeUnusedTables(); + done = adjustGlobalLimitsIfAllowed(newGlobalLimit); + } + + // must resize individual caches + if (!done) { + _globalSoftLimit = newGlobalLimit; + + // get stats on cache access to prioritize freeing from less frequently used + // caches first, so more frequently used ones stay large + cacheList = priorityList(); + + // first just adjust limits down to usage + reclaimed = resizeAllCaches(TaskEnvironment::resizing, cacheList, true, + true, _globalAllocation - _globalSoftLimit); + _globalAllocation -= reclaimed; + done = adjustGlobalLimitsIfAllowed(newGlobalLimit); + } + + // still haven't freed enough, now try cutting allocations more aggressively + // by allowing use of background tasks to actually free memory from caches + if (!done) { + if ((_resizeAttempt % 2) == 0) { + reclaimed = resizeAllCaches(TaskEnvironment::resizing, cacheList, false, + true, _globalAllocation - _globalSoftLimit); + } else { + reclaimed = migrateAllCaches(TaskEnvironment::resizing, cacheList, + _globalAllocation - _globalSoftLimit); + } + } + + if ((_resizingTasks.load() == 0)) { + _state.toggleFlag(State::Flag::resizing); + } +} + +uint64_t Manager::resizeAllCaches(Manager::TaskEnvironment environment, + std::shared_ptr cacheList, + bool noTasks, bool aggressive, + uint64_t goal) { + TRI_ASSERT(_state.isLocked()); + uint64_t reclaimed = 0; + + for (std::shared_ptr c : *cacheList) { + // skip this cache if it is already resizing or shutdown! + if (!c->canResize()) { + continue; + } + + MetadataItr metadata = c->metadata(); + metadata->lock(); + + uint64_t newLimit; + if (aggressive) { + newLimit = + (noTasks ? metadata->usage() + : (std::min)(metadata->usage(), metadata->hardLimit() / 4)); + } else { + newLimit = + (noTasks ? (std::max)(metadata->usage(), metadata->hardLimit() / 2) + : (std::min)(metadata->usage(), metadata->hardLimit() / 2)); + } + newLimit = (std::max)(newLimit, MIN_CACHE_SIZE); + + reclaimed += metadata->hardLimit() - newLimit; + resizeCache(environment, metadata, newLimit); // unlocks cache + + if (goal > 0 && reclaimed >= goal) { + break; + } + } + + return reclaimed; +} + +uint64_t Manager::migrateAllCaches(Manager::TaskEnvironment environment, + std::shared_ptr cacheList, + uint64_t goal) { + TRI_ASSERT(_state.isLocked()); + uint64_t reclaimed = 0; + + for (std::shared_ptr c : *cacheList) { + // skip this cache if it is already migrating or shutdown! + if (!c->canMigrate()) { + continue; + } + + MetadataItr metadata = c->metadata(); + metadata->lock(); + + uint32_t logSize = metadata->logSize(); + if ((logSize > MIN_TABLE_LOG_SIZE) && + increaseAllowed(tableSize(logSize - 1))) { + reclaimed += (tableSize(logSize) - tableSize(logSize - 1)); + migrateCache(environment, metadata, logSize - 1); // unlocks metadata + } + if (metadata->isLocked()) { + metadata->unlock(); + } + + if (goal > 0 && reclaimed >= goal) { + break; + } + } + + return reclaimed; +} + +void Manager::freeUnusedTables() { + TRI_ASSERT(_state.isLocked()); + for (size_t i = 0; i < 32; i++) { + while (!_tables[i].empty()) { + uint8_t* table = _tables[i].top(); + delete[] table; + _tables[i].pop(); + } + } +} + +bool Manager::adjustGlobalLimitsIfAllowed(uint64_t newGlobalLimit) { + TRI_ASSERT(_state.isLocked()); + if (newGlobalLimit < _globalAllocation) { + return false; + } + + _globalSoftLimit = newGlobalLimit; + _globalHardLimit = newGlobalLimit; + + return true; +} + +void Manager::resizeCache(Manager::TaskEnvironment environment, + Manager::MetadataItr& metadata, uint64_t newLimit) { + TRI_ASSERT(_state.isLocked()); + TRI_ASSERT(metadata->isLocked()); + + if (metadata->usage() <= newLimit) { + bool success = metadata->adjustLimits(newLimit, newLimit); + TRI_ASSERT(success); + metadata->unlock(); + return; + } + + bool success = metadata->adjustLimits(newLimit, metadata->hardLimit()); + TRI_ASSERT(success); + TRI_ASSERT(!metadata->isSet(State::Flag::resizing)); + metadata->toggleFlag(State::Flag::resizing); + metadata->unlock(); + + auto task = std::make_shared(environment, this, metadata); + bool dispatched = task->dispatch(); + if (!dispatched) { + // TODO: decide what to do if we don't have an io_service + } +} + +void Manager::migrateCache(Manager::TaskEnvironment environment, + Manager::MetadataItr& metadata, uint32_t logSize) { + TRI_ASSERT(_state.isLocked()); + TRI_ASSERT(metadata->isLocked()); + + bool unlocked; + try { + leaseTable(metadata, logSize); + TRI_ASSERT(!metadata->isSet(State::Flag::migrating)); + metadata->toggleFlag(State::Flag::migrating); + metadata->unlock(); + unlocked = true; + + auto task = std::make_shared(environment, this, metadata); + bool dispatched = task->dispatch(); + if (!dispatched) { + // TODO: decide what to do if we don't have an io_service + metadata->lock(); + reclaimTables(metadata, true); + metadata->unlock(); + } + } catch (std::bad_alloc) { + if (unlocked) { + metadata->lock(); + } + if (metadata->auxiliaryTable() != nullptr) { + uint8_t* junk = metadata->releaseAuxiliaryTable(); + delete junk; + } + metadata->unlock(); + } +} + +void Manager::leaseTable(Manager::MetadataItr& metadata, uint32_t logSize) { + TRI_ASSERT(_state.isLocked()); + TRI_ASSERT(metadata->isLocked()); + + uint8_t* table = nullptr; + if (_tables[logSize].empty()) { + table = reinterpret_cast(new PlainBucket[1 << logSize]); + memset(table, 0, tableSize(logSize)); + _globalAllocation += tableSize(logSize); + } else { + table = _tables[logSize].top(); + _tables[logSize].pop(); + } + + // if main null, main, otherwise auxiliary + metadata->grantAuxiliaryTable(table, logSize); + if (metadata->table() == nullptr) { + metadata->swapTables(); + } +} + +void Manager::reclaimTables(Manager::MetadataItr& metadata, + bool auxiliaryOnly) { + TRI_ASSERT(_state.isLocked()); + TRI_ASSERT(metadata->isLocked()); + + uint8_t* table; + uint32_t logSize; + + logSize = metadata->auxiliaryLogSize(); + table = metadata->releaseAuxiliaryTable(); + if (table != nullptr) { + _tables[logSize].push(table); + } + + if (auxiliaryOnly) { + return; + } + + logSize = metadata->logSize(); + table = metadata->releaseTable(); + if (table != nullptr) { + _tables[logSize].push(table); + } +} + +bool Manager::increaseAllowed(uint64_t increase) const { + TRI_ASSERT(_state.isLocked()); + if (_state.isSet(State::Flag::resizing) && + (_globalAllocation <= _globalSoftLimit)) { + return (increase <= (_globalSoftLimit - _globalAllocation)); + } + + return (increase <= (_globalHardLimit - _globalAllocation)); +} + +uint64_t Manager::tableSize(uint32_t logSize) const { + return (sizeof(PlainBucket) * (1ULL << logSize)); +} + +std::shared_ptr Manager::priorityList() { + TRI_ASSERT(_state.isLocked()); + std::shared_ptr list(new PriorityList()); + list->reserve(_caches.size()); + + // catalog accessed caches + auto stats = _accessStats.getFrequencies(); + std::set accessed; + for (auto s : *stats) { + accessed.emplace(s.first.get()); + } + + // gather all unaccessed caches at beginning of list + for (MetadataItr m = _caches.begin(); m != _caches.end(); m++) { + m->lock(); + std::shared_ptr cache = m->cache(); + m->unlock(); + + auto found = accessed.find(cache.get()); + if (found == accessed.end()) { + list->emplace_back(cache); + } + } + + // gather all accessed caches in order + for (auto s : *stats) { + list->emplace_back(s.first); + } + + return list; +} + +Manager::time_point Manager::futureTime(uint64_t secondsFromNow) { + return (std::chrono::steady_clock::now() + + std::chrono::seconds(secondsFromNow)); +} diff --git a/arangod/Cache/Manager.h b/arangod/Cache/Manager.h new file mode 100644 index 0000000000..cd7d6336b9 --- /dev/null +++ b/arangod/Cache/Manager.h @@ -0,0 +1,254 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_MANAGER_H +#define ARANGODB_CACHE_MANAGER_H + +#include "Basics/Common.h" +#include "Basics/asio-helper.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/Metadata.h" +#include "Cache/State.h" +#include "Cache/TransactionWindow.h" + +#include +#include +#include +#include +#include +#include +#include + +namespace arangodb { +namespace cache { + +class Cache; // forward declaration +class FreeMemoryTask; // forward declaration +class MigrateTask; // forward declaration +class Rebalancer; // forward declaration + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Coordinates a system of caches all sharing a single memory pool. +/// +/// Allows clients to create and destroy both transactional and +/// non-transactional caches with individual usage limits, but all subject to a +/// combined global limit. Re-uses memory from old, destroyed caches if possible +/// when allocating new ones to allow fast creation and destruction of +/// short-lived caches. +/// +/// The global limit may be adjusted, and compliance may be achieved through +/// asynchronous background tasks. The manager periodically rebalances the +/// allocations across the pool of caches to allow more frequently used ones to +/// have more space. +/// +/// There should be a single Manager instance exposed via +/// CacheManagerFeature::MANAGER --- use this unless you are very certain you +/// need a different instance. +//////////////////////////////////////////////////////////////////////////////// +class Manager { + public: + static uint64_t MINIMUM_SIZE; + typedef FrequencyBuffer> StatBuffer; + typedef std::vector> PriorityList; + typedef std::chrono::time_point time_point; + typedef std::list::iterator MetadataItr; + + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initialize the manager with an io_service and global usage limit. + ////////////////////////////////////////////////////////////////////////////// + Manager(boost::asio::io_service* ioService, uint64_t globalLimit); + ~Manager(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Enum to specify which type of cache to create. + ////////////////////////////////////////////////////////////////////////////// + enum CacheType { Plain, Transactional }; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Creates an individual cache. + /// + /// The type must be specified. It is possible that the cache cannot be + /// created (e.g. in situations of extreme memory pressure), in which case the + /// returned pointer will be nullptr. If there isn't enough memory to create a + /// cache with the requested limit, the actual limit may be smaller. If the + /// third parameter is true, the cache will be allowed to grow if it becomes + /// full and memory is available globally; otherwise the limit given to it by + /// the manager is a hard upper limit which may only be adjusted downward. + /// This parameter is true by default. It should likely only be set to be + /// false for low-priority, short-lived caches. + ////////////////////////////////////////////////////////////////////////////// + std::shared_ptr createCache(Manager::CacheType type, + uint64_t requestedLimit, + bool allowGrowth = true); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Destroy the given cache. + ////////////////////////////////////////////////////////////////////////////// + void destroyCache(std::shared_ptr cache); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Prepare for shutdown. + ////////////////////////////////////////////////////////////////////////////// + void beginShutdown(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Actually shutdown the manager and all caches. + ////////////////////////////////////////////////////////////////////////////// + void shutdown(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Change the global usage limit. + ////////////////////////////////////////////////////////////////////////////// + bool resize(uint64_t newGlobalLimit); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Report the current global usage limit. + ////////////////////////////////////////////////////////////////////////////// + uint64_t globalLimit(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Report the current amoutn of memory allocated to all caches. + /// + /// This serves as an upper bound on the current memory usage of all caches. + /// The actual global usage is not recorded, as this would require significant + /// additional synchronization between the caches and slow things down + /// considerably. + ////////////////////////////////////////////////////////////////////////////// + uint64_t globalAllocation(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Signal the beginning of a transaction. + ////////////////////////////////////////////////////////////////////////////// + void startTransaction(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Signal the end of a transaction. + ////////////////////////////////////////////////////////////////////////////// + void endTransaction(); + + private: + // simple state variable for locking and other purposes + State _state; + + // structure to handle access frequency monitoring + Manager::StatBuffer _accessStats; + std::atomic _accessCounter; + + // list of metadata objects to keep track of all the registered caches + std::list _caches; + + // actual tables to lease out + std::stack _tables[32]; + + // global statistics + uint64_t _globalSoftLimit; + uint64_t _globalHardLimit; + uint64_t _globalAllocation; + + // transaction management + TransactionWindow _transactions; + + // task management + enum TaskEnvironment { none, rebalancing, resizing }; + boost::asio::io_service* _ioService; + uint64_t _resizeAttempt; + std::atomic _outstandingTasks; + std::atomic _rebalancingTasks; + std::atomic _resizingTasks; + + // friend class tasks and caches to allow access + friend class Cache; + friend class FreeMemoryTask; + friend class MigrateTask; + friend class PlainCache; + friend class Rebalancer; + friend class TransactionalCache; + + private: // used by caches + // register and unregister individual caches + Manager::MetadataItr registerCache(Cache* cache, uint64_t requestedLimit, + std::function deleter); + void unregisterCache(Manager::MetadataItr& metadata); + + // allow individual caches to request changes to their allocations + std::pair requestResize( + Manager::MetadataItr& metadata, uint64_t requestedLimit); + std::pair requestMigrate( + Manager::MetadataItr& metadata, uint32_t requestedLogSize); + + // method for lr-accessed heuristics + void reportAccess(std::shared_ptr cache); + + private: // used internally and by tasks + // check if shutdown or shutting down + bool isOperational() const; + // check if there is already a global process running + bool globalProcessRunning() const; + + // expose io_service + boost::asio::io_service* ioService(); + + // coordinate state with task lifecycles + void prepareTask(TaskEnvironment environment); + void unprepareTask(TaskEnvironment environment); + + // periodically run to rebalance allocations globally + bool rebalance(); + + // helpers for global resizing + void internalResize(uint64_t newGlobalLimit, bool firstAttempt); + uint64_t resizeAllCaches(TaskEnvironment environment, + std::shared_ptr cacheList, + bool noTasks, bool aggressive, uint64_t goal); + uint64_t migrateAllCaches(TaskEnvironment environment, + std::shared_ptr cacheList, + uint64_t goal); + void freeUnusedTables(); + bool adjustGlobalLimitsIfAllowed(uint64_t newGlobalLimit); + + // methods to adjust individual caches + void resizeCache(TaskEnvironment environment, Manager::MetadataItr& metadata, + uint64_t newLimit); + void migrateCache(TaskEnvironment environment, Manager::MetadataItr& metadata, + uint32_t logSize); + void leaseTable(Manager::MetadataItr& metadata, uint32_t logSize); + void reclaimTables(Manager::MetadataItr& metadata, + bool auxiliaryOnly = false); + + // helpers for individual allocations + bool increaseAllowed(uint64_t increase) const; + uint64_t tableSize(uint32_t logSize) const; + + // helper for lr-accessed heuristics + std::shared_ptr priorityList(); + + // helper for wait times + Manager::time_point futureTime(uint64_t secondsFromNow); +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/ManagerTasks.cpp b/arangod/Cache/ManagerTasks.cpp new file mode 100644 index 0000000000..26c9772f22 --- /dev/null +++ b/arangod/Cache/ManagerTasks.cpp @@ -0,0 +1,112 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/ManagerTasks.h" +#include "Basics/Common.h" +#include "Basics/asio-helper.h" +#include "Cache/Cache.h" +#include "Cache/Manager.h" +#include "Cache/Metadata.h" + +using namespace arangodb::cache; + +FreeMemoryTask::FreeMemoryTask(Manager::TaskEnvironment environment, + Manager* manager, Manager::MetadataItr& metadata) + : _environment(environment), _manager(manager) { + metadata->lock(); + _cache = metadata->cache(); + metadata->unlock(); +} + +FreeMemoryTask::~FreeMemoryTask() {} + +bool FreeMemoryTask::dispatch() { + auto ioService = _manager->ioService(); + if (ioService == nullptr) { + return false; + } + + _manager->prepareTask(_environment); + auto self = shared_from_this(); + ioService->post([self, this]() -> void { run(); }); + + return true; +} + +void FreeMemoryTask::run() { + bool ran = _cache->freeMemory(); + + if (ran) { + _manager->_state.lock(); + auto metadata = _cache->metadata(); + metadata->lock(); + uint64_t reclaimed = metadata->hardLimit() - metadata->softLimit(); + metadata->adjustLimits(metadata->softLimit(), metadata->softLimit()); + metadata->toggleFlag(State::Flag::resizing); + metadata->unlock(); + _manager->_globalAllocation -= reclaimed; + _manager->_state.unlock(); + } + + _manager->unprepareTask(_environment); +} + +MigrateTask::MigrateTask(Manager::TaskEnvironment environment, Manager* manager, + Manager::MetadataItr& metadata) + : _environment(environment), _manager(manager) { + metadata->lock(); + _cache = metadata->cache(); + metadata->unlock(); +} + +MigrateTask::~MigrateTask() {} + +bool MigrateTask::dispatch() { + auto ioService = _manager->ioService(); + if (ioService == nullptr) { + return false; + } + + _manager->prepareTask(_environment); + auto self = shared_from_this(); + ioService->post([self, this]() -> void { run(); }); + + return true; +} + +void MigrateTask::run() { + // do the actual migration + bool ran = _cache->migrate(); + + if (ran) { + _manager->_state.lock(); + auto metadata = _cache->metadata(); + metadata->lock(); + _manager->reclaimTables(metadata, true); + metadata->toggleFlag(State::Flag::migrating); + metadata->unlock(); + _manager->_state.unlock(); + } + + _manager->unprepareTask(_environment); +} diff --git a/arangod/Cache/ManagerTasks.h b/arangod/Cache/ManagerTasks.h new file mode 100644 index 0000000000..078944f49a --- /dev/null +++ b/arangod/Cache/ManagerTasks.h @@ -0,0 +1,84 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_MANAGER_TASKS_H +#define ARANGODB_CACHE_MANAGER_TASKS_H + +#include "Basics/Common.h" +#include "Cache/Cache.h" +#include "Cache/Manager.h" +#include "Cache/Metadata.h" + +#include +#include +#include + +namespace arangodb { +namespace cache { + +class FreeMemoryTask : public std::enable_shared_from_this { + private: + Manager::TaskEnvironment _environment; + Manager* _manager; + std::shared_ptr _cache; + + public: + FreeMemoryTask() = delete; + FreeMemoryTask(FreeMemoryTask const&) = delete; + FreeMemoryTask& operator=(FreeMemoryTask const&) = delete; + + FreeMemoryTask(Manager::TaskEnvironment environment, Manager* manager, + Manager::MetadataItr& metadata); + ~FreeMemoryTask(); + + bool dispatch(); + + private: + void run(); +}; + +class MigrateTask : public std::enable_shared_from_this { + private: + Manager::TaskEnvironment _environment; + Manager* _manager; + std::shared_ptr _cache; + + public: + MigrateTask() = delete; + MigrateTask(MigrateTask const&) = delete; + MigrateTask& operator=(MigrateTask const&) = delete; + + MigrateTask(Manager::TaskEnvironment environment, Manager* manager, + Manager::MetadataItr& metadata); + ~MigrateTask(); + + bool dispatch(); + + private: + void run(); +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/Metadata.cpp b/arangod/Cache/Metadata.cpp new file mode 100644 index 0000000000..f5dfed471f --- /dev/null +++ b/arangod/Cache/Metadata.cpp @@ -0,0 +1,172 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/Metadata.h" +#include "Cache/Cache.h" +#include "Cache/State.h" + +#include +#include + +using namespace arangodb::cache; + +Metadata::Metadata(std::shared_ptr cache, uint64_t limit, uint8_t* table, + uint32_t logSize) + : _state(), + _cache(cache), + _usage(0), + _softLimit(limit), + _hardLimit(limit), + _table(table), + _auxiliaryTable(nullptr), + _logSize(logSize), + _auxiliaryLogSize(0) {} + +Metadata::Metadata(Metadata const& other) + : _state(other._state), + _cache(other._cache), + _usage(other._usage), + _softLimit(other._softLimit), + _hardLimit(other._hardLimit), + _table(other._table), + _auxiliaryTable(other._auxiliaryTable), + _logSize(other._logSize), + _auxiliaryLogSize(other._auxiliaryLogSize) {} + +void Metadata::lock() { _state.lock(); } + +void Metadata::unlock() { + TRI_ASSERT(isLocked()); + _state.unlock(); +} + +bool Metadata::isLocked() const { return _state.isLocked(); } + +std::shared_ptr Metadata::cache() const { + TRI_ASSERT(isLocked()); + return _cache; +} + +uint32_t Metadata::logSize() const { + TRI_ASSERT(isLocked()); + return _logSize; +} + +uint32_t Metadata::auxiliaryLogSize() const { + TRI_ASSERT(isLocked()); + return _auxiliaryLogSize; +} + +uint8_t* Metadata::table() const { + TRI_ASSERT(isLocked()); + return _table; +} + +uint8_t* Metadata::auxiliaryTable() const { + TRI_ASSERT(isLocked()); + return _auxiliaryTable; +} + +uint64_t Metadata::usage() const { + TRI_ASSERT(isLocked()); + return _usage; +} + +uint64_t Metadata::softLimit() const { + TRI_ASSERT(isLocked()); + return _softLimit; +} + +uint64_t Metadata::hardLimit() const { + TRI_ASSERT(isLocked()); + return _hardLimit; +} + +bool Metadata::adjustUsageIfAllowed(int64_t usageChange) { + TRI_ASSERT(isLocked()); + + if (usageChange < 0) { + _usage -= static_cast(-usageChange); + return true; + } + + if ((static_cast(usageChange) + _usage <= _softLimit) || + ((_usage > _softLimit) && + (static_cast(usageChange) + _usage <= _hardLimit))) { + _usage += static_cast(usageChange); + return true; + } + + return false; +} + +bool Metadata::adjustLimits(uint64_t softLimit, uint64_t hardLimit) { + TRI_ASSERT(isLocked()); + + if (hardLimit < _usage) { + return false; + } + + _softLimit = softLimit; + _hardLimit = hardLimit; + + return true; +} + +void Metadata::grantAuxiliaryTable(uint8_t* table, uint32_t logSize) { + TRI_ASSERT(isLocked()); + _auxiliaryTable = table; + _auxiliaryLogSize = logSize; +} + +void Metadata::swapTables() { + TRI_ASSERT(isLocked()); + std::swap(_table, _auxiliaryTable); + std::swap(_logSize, _auxiliaryLogSize); +} + +uint8_t* Metadata::releaseTable() { + TRI_ASSERT(isLocked()); + uint8_t* table = _table; + _table = nullptr; + _logSize = 0; + return table; +} + +uint8_t* Metadata::releaseAuxiliaryTable() { + TRI_ASSERT(isLocked()); + uint8_t* table = _auxiliaryTable; + _auxiliaryTable = nullptr; + _auxiliaryLogSize = 0; + return table; +} + +bool Metadata::isSet(State::Flag flag) const { + TRI_ASSERT(isLocked()); + return _state.isSet(flag); +} + +void Metadata::toggleFlag(State::Flag flag) { + TRI_ASSERT(isLocked()); + _state.toggleFlag(flag); +} diff --git a/arangod/Cache/Metadata.h b/arangod/Cache/Metadata.h new file mode 100644 index 0000000000..0ad3814fc4 --- /dev/null +++ b/arangod/Cache/Metadata.h @@ -0,0 +1,198 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_METADATA_H +#define ARANGODB_CACHE_METADATA_H + +#include "Basics/Common.h" +#include "Cache/State.h" + +#include +#include + +namespace arangodb { +namespace cache { + +class Cache; // forward declaration + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Metadata object to facilitate information sharing between individual +/// Cache instances and Manager. +//////////////////////////////////////////////////////////////////////////////// +class Metadata { + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initializes record with given information. + ////////////////////////////////////////////////////////////////////////////// + Metadata(std::shared_ptr cache, uint64_t limit, + uint8_t* table = nullptr, uint32_t logSize = 0); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initializes record from an existing record. + ////////////////////////////////////////////////////////////////////////////// + Metadata(Metadata const& other); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Locks the record. + ////////////////////////////////////////////////////////////////////////////// + void lock(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Unlocks the record. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + void unlock(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns true if the record is locked, false otherwise. + ////////////////////////////////////////////////////////////////////////////// + bool isLocked() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Returns a shared pointer to the underlying cache. Requires record + /// to be locked. + ////////////////////////////////////////////////////////////////////////////// + std::shared_ptr cache() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Pointer to the table. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + uint8_t* table() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief The base-2 logarithm of the number of buckets in the table. + /// Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + uint32_t logSize() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Pointer to the auxiliary table. Requires record to be locked. + /// + /// Will typically be nullptr. This will be set to a non-null value prior to + /// migration. During migration, both tables will temporarily be in use. Upon + /// completion of migration, the tables are swapped and the old table is + /// released to the manager. + ////////////////////////////////////////////////////////////////////////////// + uint8_t* auxiliaryTable() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief The base-2 logarithm of the number of buckets in the auxiliary + /// table. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + uint32_t auxiliaryLogSize() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief The current memory usage of the cache in bytes. Requires record to + /// be locked. + ////////////////////////////////////////////////////////////////////////////// + uint64_t usage() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief The soft usage limit for this cache. Requires record to be locked. + /// + /// Typically, this will be equal to the hard limit. It may be lower when the + /// cache is resizing. If the current usage is below the soft limit, then new + /// insertions are not allowed to exceed the soft limit. If the current usage + /// is above the soft limit, then new insertions may occur as long as they do + /// not exceed the hard limit; a background task will be working in parallel + /// to remove older values to bring usage below the soft limit. + ////////////////////////////////////////////////////////////////////////////// + uint64_t softLimit() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief The hard usage limit for this cache. Requires record to be locked. + /// + /// Usage is guaranteed to remain under this value at all times. + ////////////////////////////////////////////////////////////////////////////// + uint64_t hardLimit() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Adjusts usage by the specified amount if it will not violate + /// limits. Requires record to be locked. + /// + /// Returns true if adjusted, false otherwise. Used by caches to check-and-set + /// in a single operation to determine whether they can afford to store a new + /// value. + ////////////////////////////////////////////////////////////////////////////// + bool adjustUsageIfAllowed(int64_t usageChange); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Sets the soft and hard usage limits. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + bool adjustLimits(uint64_t softLimit, uint64_t hardLimit); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Lets the manager grant a new table lease to the cache for + /// migration. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + void grantAuxiliaryTable(uint8_t* table, uint32_t logSize); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Swap the main and auxiliary tables (both pointers and sizes). + /// Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + void swapTables(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Release the main table back to the manager. Requires record to be + /// locked. + ////////////////////////////////////////////////////////////////////////////// + uint8_t* releaseTable(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Release the auxiliary table back to the manager. Requires record to + /// be locked. + ////////////////////////////////////////////////////////////////////////////// + uint8_t* releaseAuxiliaryTable(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks if flag is set in state. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + bool isSet(State::Flag flag) const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Toggles flag in state. Requires record to be locked. + ////////////////////////////////////////////////////////////////////////////// + void toggleFlag(State::Flag flag); + + private: + State _state; + + // pointer to underlying cache + std::shared_ptr _cache; + + // vital information about memory usage + uint64_t _usage; + uint64_t _softLimit; + uint64_t _hardLimit; + + // information about table leases + uint8_t* _table; + uint8_t* _auxiliaryTable; + uint32_t _logSize; + uint32_t _auxiliaryLogSize; +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/PlainBucket.cpp b/arangod/Cache/PlainBucket.cpp new file mode 100644 index 0000000000..03faf08ab3 --- /dev/null +++ b/arangod/Cache/PlainBucket.cpp @@ -0,0 +1,173 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/PlainBucket.h" +#include "Basics/Common.h" +#include "Cache/CachedValue.h" +#include "Cache/State.h" + +#include +#include + +using namespace arangodb::cache; + +size_t PlainBucket::SLOTS_DATA = 5; + +PlainBucket::PlainBucket() { + _state.lock(); + clear(); +} + +bool PlainBucket::lock(int64_t maxTries) { return _state.lock(maxTries); } + +void PlainBucket::unlock() { + TRI_ASSERT(_state.isLocked()); + _state.unlock(); +} + +bool PlainBucket::isLocked() const { return _state.isLocked(); } + +bool PlainBucket::isMigrated() const { + TRI_ASSERT(isLocked()); + return _state.isSet(State::Flag::migrated); +} + +bool PlainBucket::isFull() const { + TRI_ASSERT(isLocked()); + bool hasEmptySlot = false; + for (size_t i = 0; i < SLOTS_DATA; i++) { + size_t slot = SLOTS_DATA - (i + 1); + if (_cachedHashes[slot] == 0) { + hasEmptySlot = true; + break; + } + } + + return !hasEmptySlot; +} + +CachedValue* PlainBucket::find(uint32_t hash, void const* key, uint32_t keySize, + bool moveToFront) { + TRI_ASSERT(isLocked()); + CachedValue* result = nullptr; + + for (size_t i = 0; i < SLOTS_DATA; i++) { + if (_cachedHashes[i] == 0) { + break; + } + if (_cachedHashes[i] == hash && _cachedData[i]->sameKey(key, keySize)) { + result = _cachedData[i]; + if (moveToFront) { + moveSlot(i, true); + } + break; + } + } + + return result; +} + +// requires there to be an open slot, otherwise will not be inserted +void PlainBucket::insert(uint32_t hash, CachedValue* value) { + TRI_ASSERT(isLocked()); + for (size_t i = 0; i < SLOTS_DATA; i++) { + if (_cachedHashes[i] == 0) { + // found an empty slot + _cachedHashes[i] = hash; + _cachedData[i] = value; + if (i != 0) { + moveSlot(i, true); + } + return; + } + } +} + +CachedValue* PlainBucket::remove(uint32_t hash, void const* key, + uint32_t keySize) { + TRI_ASSERT(isLocked()); + CachedValue* value = find(hash, key, keySize, false); + if (value != nullptr) { + evict(value, false); + } + + return value; +} + +CachedValue* PlainBucket::evictionCandidate() const { + TRI_ASSERT(isLocked()); + for (size_t i = 0; i < SLOTS_DATA; i++) { + size_t slot = SLOTS_DATA - (i + 1); + if (_cachedHashes[slot] == 0) { + continue; + } + if (_cachedData[slot]->isFreeable()) { + return _cachedData[slot]; + } + } + + return nullptr; +} + +void PlainBucket::evict(CachedValue* value, bool optimizeForInsertion) { + TRI_ASSERT(isLocked()); + for (size_t i = 0; i < SLOTS_DATA; i++) { + size_t slot = SLOTS_DATA - (i + 1); + if (_cachedData[slot] == value) { + // found a match + _cachedHashes[slot] = 0; + _cachedData[slot] = nullptr; + moveSlot(slot, optimizeForInsertion); + return; + } + } +} + +void PlainBucket::clear() { + TRI_ASSERT(isLocked()); + memset(this, 0, sizeof(PlainBucket)); +} + +void PlainBucket::moveSlot(size_t slot, bool moveToFront) { + TRI_ASSERT(isLocked()); + uint32_t hash = _cachedHashes[slot]; + CachedValue* value = _cachedData[slot]; + size_t i = slot; + if (moveToFront) { + // move slot to front + for (; i >= 1; i--) { + _cachedHashes[i] = _cachedHashes[i - 1]; + _cachedData[i] = _cachedData[i - 1]; + } + } else { + // move slot to back + for (; (i < SLOTS_DATA - 1) && (_cachedHashes[i + 1] != 0); i++) { + _cachedHashes[i] = _cachedHashes[i + 1]; + _cachedData[i] = _cachedData[i + 1]; + } + } + if (i != slot) { + _cachedHashes[i] = hash; + _cachedData[i] = value; + } +} diff --git a/arangod/Cache/PlainBucket.h b/arangod/Cache/PlainBucket.h new file mode 100644 index 0000000000..6ead5417a6 --- /dev/null +++ b/arangod/Cache/PlainBucket.h @@ -0,0 +1,160 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_PLAIN_BUCKET_H +#define ARANGODB_CACHE_PLAIN_BUCKET_H + +#include "Basics/Common.h" +#include "Cache/CachedValue.h" +#include "Cache/State.h" + +#include +#include + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Bucket structure for PlainCache. +/// +/// Contains only a State variable and five slots each for hashes and data +/// pointers. Most querying and manipulation can be handled via the exposed +/// methods. Bucket must be locked before doing anything else to ensure proper +/// synchronization. Data entries are carefully laid out to ensure the structure +/// fits in a single cacheline. +//////////////////////////////////////////////////////////////////////////////// +struct alignas(64) PlainBucket { + State _state; + + // actual cached entries + uint32_t _cachedHashes[5]; + CachedValue* _cachedData[5]; + static size_t SLOTS_DATA; + +// padding, if necessary? +#ifdef TRI_PADDING_32 + uint32_t _padding[3]; +#endif + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initialize an empty bucket. + ////////////////////////////////////////////////////////////////////////////// + PlainBucket(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Attempt to lock bucket (failing after maxTries attempts). + ////////////////////////////////////////////////////////////////////////////// + bool lock(int64_t maxTries); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Unlock the bucket. Requires bucket to be locked. + ////////////////////////////////////////////////////////////////////////////// + void unlock(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks whether the bucket is locked. + ////////////////////////////////////////////////////////////////////////////// + bool isLocked() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks whether the bucket has been migrated. Requires state to be + /// locked. + ////////////////////////////////////////////////////////////////////////////// + bool isMigrated() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks whether bucket is full. Requires state to be locked. + ////////////////////////////////////////////////////////////////////////////// + bool isFull() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Looks up a given key and returns associated value. Requires state + /// to be locked. + /// + /// Takes an input hash and key (specified by pointer and size), and searches + /// the bucket for a matching entry. If a matching entry is found, it is + /// returned. By default, a matching entry will be moved to the front of the + /// bucket to allow basic LRU semantics. If no matching entry is found, + /// nothing will be changed and a nullptr will be returned. + ////////////////////////////////////////////////////////////////////////////// + CachedValue* find(uint32_t hash, void const* key, uint32_t keySize, + bool moveToFront = true); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Inserts a given value. Requires state to be locked. + /// + /// Requires that the bucket is not full and does not already contain an item + /// with the same key. If it is full, the item will not be inserted. If an + /// item with the same key exists, this is not detected but it is likely to + /// produce bugs later on down the line. When inserting, the item is put into + /// the first empty slot, then moved to the front. If attempting to insert and + /// the bucket is full, the user should evict an item and specify the + /// optimizeForInsertion flag to be true. + ////////////////////////////////////////////////////////////////////////////// + void insert(uint32_t hash, CachedValue* value); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Removes an item with the given key if one exists. Requires state to + /// be locked. + /// + /// Search for a matching key. If none exists, do nothing and return a + /// nullptr. If one exists, remove it from the bucket and return the pointer + /// to the value. Upon removal, the empty slot generated is moved to the back + /// of the bucket (to remove the gap). + ////////////////////////////////////////////////////////////////////////////// + CachedValue* remove(uint32_t hash, void const* key, uint32_t keySize); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Searches for the best candidate in the bucket to evict. Requires + /// state to be locked. + /// + /// Returns a pointer to least recently used freeable value. If the bucket + /// contains no values or all have outstanding references, then it returns + /// nullptr. + ////////////////////////////////////////////////////////////////////////////// + CachedValue* evictionCandidate() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Evicts the given value from the bucket. Requires state to be + /// locked. + /// + /// By default, it will move the empty slot to the back of the bucket. If + /// preparing an empty slot for insertion, specify the second parameter to be + /// true. This will move the empty slot to the front instead. + ////////////////////////////////////////////////////////////////////////////// + void evict(CachedValue* value, bool optimizeForInsertion = false); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Reinitializes a bucket to be completely empty and unlocked. + /// Requires state to be locked. + ////////////////////////////////////////////////////////////////////////////// + void clear(); + + private: + void moveSlot(size_t slot, bool moveToFront); +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/PlainCache.cpp b/arangod/Cache/PlainCache.cpp new file mode 100644 index 0000000000..dd3f52b8e3 --- /dev/null +++ b/arangod/Cache/PlainCache.cpp @@ -0,0 +1,409 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/PlainCache.h" +#include "Basics/Common.h" +#include "Cache/Cache.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/Metadata.h" +#include "Cache/PlainBucket.h" +#include "Cache/State.h" +#include "Random/RandomGenerator.h" + +#include +#include +#include +#include + +using namespace arangodb::cache; + +static constexpr int64_t TRIES_FAST = 50LL; +static constexpr int64_t TRIES_SLOW = 10000LL; +static constexpr int64_t TRIES_GUARANTEE = -1LL; + +Cache::Finding PlainCache::find(void const* key, uint32_t keySize) { + TRI_ASSERT(key != nullptr); + Finding result(nullptr); + uint32_t hash = hashKey(key, keySize); + + bool ok; + PlainBucket* bucket; + std::tie(ok, bucket) = getBucket(hash, TRIES_FAST); + + if (ok) { + result.reset(bucket->find(hash, key, keySize)); + bucket->unlock(); + endOperation(); + } + + return result; +} + +bool PlainCache::insert(CachedValue* value) { + TRI_ASSERT(value != nullptr); + bool inserted = false; + uint32_t hash = hashKey(value->key(), value->keySize); + + bool ok; + PlainBucket* bucket; + std::tie(ok, bucket) = getBucket(hash, TRIES_FAST); + + if (ok) { + int64_t change = value->size(); + CachedValue* candidate = bucket->find(hash, value->key(), value->keySize); + + if (candidate == nullptr && bucket->isFull()) { + candidate = bucket->evictionCandidate(); + } + if (candidate != nullptr) { + change -= candidate->size(); + } + + _metadata->lock(); + bool allowed = _metadata->adjustUsageIfAllowed(change); + _metadata->unlock(); + + if (allowed) { + if (candidate != nullptr) { + bucket->evict(candidate, true); + freeValue(candidate); + recordStat(Stat::eviction); + } else { + recordStat(Stat::noEviction); + } + bucket->insert(hash, value); + inserted = true; + } else { + requestResize(); // let function do the hard work + } + + bucket->unlock(); + requestMigrate(); // let function do the hard work + endOperation(); + } + + return inserted; +} + +bool PlainCache::remove(void const* key, uint32_t keySize) { + TRI_ASSERT(key != nullptr); + bool removed = false; + uint32_t hash = hashKey(key, keySize); + + bool ok; + PlainBucket* bucket; + std::tie(ok, bucket) = getBucket(hash, TRIES_SLOW); + + if (ok) { + int64_t change = 0; + CachedValue* candidate = bucket->remove(hash, key, keySize); + + if (candidate != nullptr) { + change -= candidate->size(); + + _metadata->lock(); + bool allowed = _metadata->adjustUsageIfAllowed(change); + TRI_ASSERT(allowed); + _metadata->unlock(); + + freeValue(candidate); + } + + removed = true; + bucket->unlock(); + endOperation(); + } + + return removed; +} + +std::shared_ptr PlainCache::create(Manager* manager, + uint64_t requestedSize, + bool allowGrowth) { + PlainCache* cache = new PlainCache(manager, requestedSize, allowGrowth); + + if (cache == nullptr) { + return std::shared_ptr(nullptr); + } + + cache->metadata()->lock(); + std::shared_ptr result = cache->metadata()->cache(); + cache->metadata()->unlock(); + + return result; +} + +PlainCache::PlainCache(Manager* manager, uint64_t requestedLimit, + bool allowGrowth) + : Cache(manager, requestedLimit, allowGrowth, + [](Cache* p) -> void { delete reinterpret_cast(p); }), + _auxiliaryTable(nullptr), + _auxiliaryLogSize(0), + _auxiliaryTableSize(1), + _auxiliaryMaskShift(32), + _auxiliaryBucketMask(0) { + _state.lock(); + if (isOperational()) { + _metadata->lock(); + _table = reinterpret_cast(_metadata->table()); + _logSize = _metadata->logSize(); + _tableSize = (1 << _logSize); + _maskShift = 32 - _logSize; + _bucketMask = (_tableSize - 1) << _maskShift; + _metadata->unlock(); + } + _state.unlock(); +} + +PlainCache::~PlainCache() { + _state.lock(); + if (isOperational()) { + _state.unlock(); + shutdown(); + } + if (_state.isLocked()) { + _state.unlock(); + } +} + +bool PlainCache::freeMemory() { + _state.lock(); + if (!isOperational()) { + _state.unlock(); + return false; + } + startOperation(); + _state.unlock(); + + bool underLimit = reclaimMemory(0ULL); + while (!underLimit) { + // pick a random bucket + uint32_t randomHash = RandomGenerator::interval(UINT32_MAX); + bool ok; + PlainBucket* bucket; + std::tie(ok, bucket) = getBucket(randomHash, TRIES_FAST, false); + + if (ok) { + // evict LRU freeable value if exists + CachedValue* candidate = bucket->evictionCandidate(); + + if (candidate != nullptr) { + uint64_t size = candidate->size(); + bucket->evict(candidate); + freeValue(candidate); + + underLimit = reclaimMemory(size); + } + + bucket->unlock(); + } + } + + endOperation(); + return true; +} + +bool PlainCache::migrate() { + _state.lock(); + if (!isOperational()) { + _state.unlock(); + return false; + } + startOperation(); + _metadata->lock(); + if (_metadata->table() == nullptr || _metadata->auxiliaryTable() == nullptr) { + _metadata->unlock(); + _state.unlock(); + endOperation(); + return false; + } + _auxiliaryTable = reinterpret_cast(_metadata->auxiliaryTable()); + _auxiliaryLogSize = _metadata->auxiliaryLogSize(); + _auxiliaryTableSize = (1 << _auxiliaryLogSize); + _auxiliaryMaskShift = (32 - _auxiliaryLogSize); + _auxiliaryBucketMask = (_auxiliaryTableSize - 1) << _auxiliaryMaskShift; + _metadata->unlock(); + _state.toggleFlag(State::Flag::migrating); + _state.unlock(); + + for (uint32_t i = 0; i < _tableSize; i++) { + // lock current bucket + PlainBucket* bucket = &(_table[i]); + bucket->lock(-1LL); + + // collect target bucket(s) + std::vector targets; + if (_logSize > _auxiliaryLogSize) { + uint32_t targetIndex = (i << _maskShift) >> _auxiliaryMaskShift; + targets.emplace_back(&(_auxiliaryTable[targetIndex])); + } else { + uint32_t baseIndex = (i << _maskShift) >> _auxiliaryMaskShift; + for (size_t j = 0; j < (1U << (_auxiliaryLogSize - _logSize)); j++) { + uint32_t targetIndex = baseIndex + j; + targets.emplace_back(&(_auxiliaryTable[targetIndex])); + } + } + // lock target bucket(s) + for (PlainBucket* targetBucket : targets) { + targetBucket->lock(TRIES_GUARANTEE); + } + + for (size_t j = 0; j < PlainBucket::SLOTS_DATA; j++) { + size_t k = PlainBucket::SLOTS_DATA - (j + 1); + if ((*bucket)._cachedHashes[k] != 0) { + uint32_t hash = (*bucket)._cachedHashes[k]; + CachedValue* value = (*bucket)._cachedData[k]; + + uint32_t targetIndex = + (hash & _auxiliaryBucketMask) >> _auxiliaryMaskShift; + PlainBucket* targetBucket = &(_auxiliaryTable[targetIndex]); + if (targetBucket->isFull()) { + CachedValue* candidate = targetBucket->evictionCandidate(); + targetBucket->evict(candidate, true); + uint64_t size = candidate->size(); + freeValue(candidate); + reclaimMemory(size); + } + targetBucket->insert(hash, value); + + (*bucket)._cachedHashes[k] = 0; + (*bucket)._cachedData[k] = nullptr; + } + } + + // unlock targets + for (PlainBucket* targetBucket : targets) { + targetBucket->unlock(); + } + + // finish up this bucket's migration + bucket->_state.toggleFlag(State::Flag::migrated); + bucket->unlock(); + } + + // swap tables and unmark local migrating flag + _state.lock(); + std::swap(_table, _auxiliaryTable); + std::swap(_logSize, _auxiliaryLogSize); + std::swap(_tableSize, _auxiliaryTableSize); + std::swap(_maskShift, _auxiliaryMaskShift); + std::swap(_bucketMask, _auxiliaryBucketMask); + _state.toggleFlag(State::Flag::migrating); + _state.unlock(); + + // clear out old table + clearTable(_auxiliaryTable, _auxiliaryTableSize); + + // release references to old table + _state.lock(); + _auxiliaryTable = nullptr; + _auxiliaryLogSize = 0; + _auxiliaryTableSize = 1; + _auxiliaryMaskShift = 32; + _auxiliaryBucketMask = 0; + _state.unlock(); + + // swap table in metadata + _metadata->lock(); + _metadata->swapTables(); + _metadata->unlock(); + + endOperation(); + return true; +} + +void PlainCache::clearTables() { + if (_table != nullptr) { + clearTable(_table, _tableSize); + } + if (_auxiliaryTable != nullptr) { + clearTable(_auxiliaryTable, _auxiliaryTableSize); + } +} + +std::pair PlainCache::getBucket(uint32_t hash, + int64_t maxTries, + bool singleOperation) { + PlainBucket* bucket = nullptr; + bool started = false; + + bool ok = _state.lock(maxTries); + if (ok) { + ok = isOperational(); + if (ok) { + if (singleOperation) { + startOperation(); + started = true; + _metadata->lock(); + _manager->reportAccess(_metadata->cache()); + _metadata->unlock(); + } + + bucket = &(_table[getIndex(hash, false)]); + ok = bucket->lock(maxTries); + if (ok && + bucket->isMigrated()) { // get bucket from auxiliary table instead + bucket->unlock(); + bucket = &(_auxiliaryTable[getIndex(hash, true)]); + ok = bucket->lock(maxTries); + if (ok && bucket->isMigrated()) { + ok = false; + bucket->unlock(); + } + } + } + if (!ok && started) { + endOperation(); + } + _state.unlock(); + } + + return std::pair(ok, bucket); +} + +void PlainCache::clearTable(PlainBucket* table, uint64_t tableSize) { + for (uint64_t i = 0; i < tableSize; i++) { + PlainBucket* bucket = &(table[i]); + bucket->lock(-1LL); + CachedValue* value = bucket->evictionCandidate(); + while (value != nullptr) { + bucket->evict(value); + _metadata->lock(); + _metadata->adjustUsageIfAllowed(-static_cast(value->size())); + _metadata->unlock(); + freeValue(value); + + value = bucket->evictionCandidate(); + } + bucket->clear(); + } +} + +uint32_t PlainCache::getIndex(uint32_t hash, bool useAuxiliary) const { + if (useAuxiliary) { + return ((hash & _auxiliaryBucketMask) >> _auxiliaryMaskShift); + } + + return ((hash & _bucketMask) >> _maskShift); +} diff --git a/arangod/Cache/PlainCache.h b/arangod/Cache/PlainCache.h new file mode 100644 index 0000000000..039df37400 --- /dev/null +++ b/arangod/Cache/PlainCache.h @@ -0,0 +1,132 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_PLAIN_CACHE_H +#define ARANGODB_CACHE_PLAIN_CACHE_H + +#include "Basics/Common.h" +#include "Cache/Cache.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/Manager.h" +#include "Cache/ManagerTasks.h" +#include "Cache/Metadata.h" +#include "Cache/PlainBucket.h" +#include "Cache/State.h" + +#include +#include +#include +#include + +namespace arangodb { +namespace cache { + +class Manager; // forward declaration + +//////////////////////////////////////////////////////////////////////////////// +/// @brief A simple, LRU-ish cache. +/// +/// To create a cache, see Manager class. Once created, the class has a very +/// simple API following that of the base Cache class. For any non-pure-virtual +/// functions, see Cache.h for documentation. +//////////////////////////////////////////////////////////////////////////////// +class PlainCache final : public Cache { + public: + PlainCache() = delete; + PlainCache(PlainCache const&) = delete; + PlainCache& operator=(PlainCache const&) = delete; + + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief Looks up the given key. + /// + /// May report a false negative if it fails to acquire a lock in a timely + /// fashion. Should not block for long. + ////////////////////////////////////////////////////////////////////////////// + Cache::Finding find(void const* key, uint32_t keySize); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Attempts to insert the given value. + /// + /// Returns true if inserted, false otherwise. Will not insert value if this + /// would cause the total usage to exceed the limits. May also not insert + /// value if it fails to acquire a lock in a timely fashion. Should not block + /// for long. + ////////////////////////////////////////////////////////////////////////////// + bool insert(CachedValue* value); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Attempts to remove the given key. + /// + /// Returns true if the key guaranteed not to be in the cache, false if the + /// key may remain in the cache. May leave the key in the cache if it fails to + /// acquire a lock in a timely fashion. Makes more attempts to acquire a lock + /// before quitting, so may block for longer than find or insert. + ////////////////////////////////////////////////////////////////////////////// + bool remove(void const* key, uint32_t keySize); + + private: + // main table info + PlainBucket* _table; + uint32_t _logSize; + uint64_t _tableSize; + uint32_t _maskShift; + uint32_t _bucketMask; + + // auxiliary table info + PlainBucket* _auxiliaryTable; + uint32_t _auxiliaryLogSize; + uint64_t _auxiliaryTableSize; + uint32_t _auxiliaryMaskShift; + uint32_t _auxiliaryBucketMask; + + // friend class manager and tasks + friend class FreeMemoryTask; + friend class Manager; + friend class MigrateTask; + + private: + // creator -- do not use constructor explicitly + static std::shared_ptr create(Manager* manager, uint64_t requestedSize, + bool allowGrowth); + + PlainCache(Manager* manager, uint64_t requestedLimit, bool allowGrowth); + ~PlainCache(); + + // management + bool freeMemory(); + bool migrate(); + void clearTables(); + + // helpers + std::pair getBucket(uint32_t hash, int64_t maxTries, + bool singleOperation = true); + void clearTable(PlainBucket* table, uint64_t tableSize); + uint32_t getIndex(uint32_t hash, bool useAuxiliary) const; +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/Rebalancer.cpp b/arangod/Cache/Rebalancer.cpp new file mode 100644 index 0000000000..dd3e0ff7ca --- /dev/null +++ b/arangod/Cache/Rebalancer.cpp @@ -0,0 +1,37 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/Rebalancer.h" +#include "Basics/Common.h" +#include "Cache/Manager.h" + +using namespace arangodb::cache; + +Rebalancer::Rebalancer(Manager* manager) : _manager(manager) {} + +bool Rebalancer::rebalance() { + if (_manager != nullptr) { + return _manager->rebalance(); + } + return false; +} diff --git a/arangod/Cache/Rebalancer.h b/arangod/Cache/Rebalancer.h new file mode 100644 index 0000000000..290e24f3f8 --- /dev/null +++ b/arangod/Cache/Rebalancer.h @@ -0,0 +1,56 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_REBALANCER_H +#define ARANGODB_CACHE_REBALANCER_H + +#include "Basics/Common.h" + +#include "Manager.h" + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Dedicated class to rebalance Manager. +//////////////////////////////////////////////////////////////////////////////// +class Rebalancer { + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initialize state with no open transactions. + ////////////////////////////////////////////////////////////////////////////// + Rebalancer(Manager* manager); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Rebalance the manager. + ////////////////////////////////////////////////////////////////////////////// + bool rebalance(); + + private: + Manager* _manager; +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/State.cpp b/arangod/Cache/State.cpp new file mode 100644 index 0000000000..f960719ef8 --- /dev/null +++ b/arangod/Cache/State.cpp @@ -0,0 +1,77 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/State.h" +#include "Basics/Common.h" + +#include +#include + +using namespace arangodb::cache; + +State::State() : _state(0) {} + +State::State(State const& other) : _state(other._state.load()) {} + +bool State::isLocked() const { + return ((_state.load() & static_cast(Flag::locked)) > 0); +} + +bool State::lock(int64_t maxTries, State::CallbackType cb) { + int64_t attempt = 0; + while (maxTries < 0 || attempt < maxTries) { + // expect unlocked, but need to preserve migrating status + uint32_t expected = _state.load() & (~static_cast(Flag::locked)); + bool success = _state.compare_exchange_strong( + expected, + (expected | static_cast(Flag::locked))); // try to lock + if (success) { + cb(); + return true; + } + attempt++; + // TODO: exponential back-off for failure? + } + + return false; +} + +void State::unlock() { + TRI_ASSERT(isLocked()); + _state &= ~static_cast(Flag::locked); +} + +bool State::isSet(State::Flag flag) const { + TRI_ASSERT(isLocked()); + return ((_state.load() & static_cast(flag)) > 0); +} + +void State::toggleFlag(State::Flag flag) { + TRI_ASSERT(isLocked()); + _state ^= static_cast(flag); +} + +void State::clear() { + TRI_ASSERT(isLocked()); + _state = static_cast(Flag::locked); +} diff --git a/arangod/Cache/State.h b/arangod/Cache/State.h new file mode 100644 index 0000000000..d0009ee923 --- /dev/null +++ b/arangod/Cache/State.h @@ -0,0 +1,126 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_STATE_H +#define ARANGODB_CACHE_STATE_H + +#include "Basics/Common.h" + +#include +#include + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Simple state class with a small footprint. +/// +/// Underlying store is simply a std::atomic, and each bit corresponds +/// to a flag that can be set. The lowest bit is special and is designated as +/// the locking flag. Any access (read or modify) to the state must occur when +/// the state is already locked; the two exceptions are to check whether the +/// state is locked and, of course, to lock it. Any flags besides the lock flag +/// are treated uniformly, and can be checked or toggled. Each flag is defined +/// via an enum and must correspond to exactly one set bit. +//////////////////////////////////////////////////////////////////////////////// +struct State { + typedef std::function CallbackType; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Flags which can be queried or toggled to reflect state. + /// + /// Each flag must have exactly one bit set, fit in uint32_t. The 'locked' + /// flag is special and should remain the least-significant bit. When other + /// flags are added,they should be kept in alphabetical order for readability, + /// and all flag values should be adjusted to keep bit-significance in + /// ascending order. + ////////////////////////////////////////////////////////////////////////////// + enum class Flag : uint32_t { + locked = 0x00000001, + blacklisted = 0x00000002, + migrated = 0x00000004, + migrating = 0x00000008, + rebalancing = 0x00000010, + resizing = 0x00000020, + shutdown = 0x00000040, + shuttingDown = 0x00000080 + }; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initializes state with no flags set and unlocked + ////////////////////////////////////////////////////////////////////////////// + State(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initializes state to match another + ////////////////////////////////////////////////////////////////////////////// + State(State const& other); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks if state is locked. + ////////////////////////////////////////////////////////////////////////////// + bool isLocked() const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Used to lock state. Returns true if locked, false otherwise. + /// + /// By default, it will try as many times as necessary to acquire the lock. + /// The number of tries may be limited to any positive integer value to avoid + /// excessively long waits. The return value indicates whether the state was + /// locked or not. The optional second parameter is a function which will be + /// called upon successfully locking the state. + ////////////////////////////////////////////////////////////////////////////// + bool lock(int64_t maxTries = -1LL, State::CallbackType cb = []() -> void {}); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Unlocks the state. Requires state to be locked. + ////////////////////////////////////////////////////////////////////////////// + void unlock(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Checks whether the given flag is set. Requires state to be locked. + ////////////////////////////////////////////////////////////////////////////// + bool isSet(State::Flag flag) const; + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Toggles the given flag. Requires state to be locked. + ////////////////////////////////////////////////////////////////////////////// + void toggleFlag(State::Flag flag); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Unsets all flags besides Flag::locked. Requires state to be locked. + ////////////////////////////////////////////////////////////////////////////// + void clear(); + + private: + std::atomic _state; +}; + +// ensure that state is exactly the size of uint32_t +static_assert(sizeof(State) == sizeof(uint32_t), + "Expected sizeof(State) == sizeof(uint32_t)."); + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/TransactionWindow.cpp b/arangod/Cache/TransactionWindow.cpp new file mode 100644 index 0000000000..72ccf9a847 --- /dev/null +++ b/arangod/Cache/TransactionWindow.cpp @@ -0,0 +1,45 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/TransactionWindow.h" + +#include +#include + +using namespace arangodb::cache; + +TransactionWindow::TransactionWindow() : _open(0), _term(0) {} + +void TransactionWindow::start() { + if (++_open == 1) { + _term++; + } +} + +void TransactionWindow::end() { + if (--_open == 0) { + _term++; + } +} + +uint64_t TransactionWindow::term() { return _term.load(); } diff --git a/arangod/Cache/TransactionWindow.h b/arangod/Cache/TransactionWindow.h new file mode 100644 index 0000000000..085fc5da85 --- /dev/null +++ b/arangod/Cache/TransactionWindow.h @@ -0,0 +1,72 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_TRANSACTION_WINDOW_H +#define ARANGODB_CACHE_TRANSACTION_WINDOW_H + +#include "Basics/Common.h" + +#include +#include + +namespace arangodb { +namespace cache { + +//////////////////////////////////////////////////////////////////////////////// +/// @brief Manage windows in time when there are either no ongoing transactions +/// or some. +/// +/// Allows clients to start a transaction, end a transaction, and query an +/// identifier for the current window. +//////////////////////////////////////////////////////////////////////////////// +class TransactionWindow { + public: + ////////////////////////////////////////////////////////////////////////////// + /// @brief Initialize state with no open transactions. + ////////////////////////////////////////////////////////////////////////////// + TransactionWindow(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Signal the beginning of a transaction. + ////////////////////////////////////////////////////////////////////////////// + void start(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Signal the end of a transaction. + ////////////////////////////////////////////////////////////////////////////// + void end(); + + ////////////////////////////////////////////////////////////////////////////// + /// @brief Return the current window identifier. + ////////////////////////////////////////////////////////////////////////////// + uint64_t term(); + + private: + std::atomic _open; + std::atomic _term; +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/TransactionalBucket.cpp b/arangod/Cache/TransactionalBucket.cpp new file mode 100644 index 0000000000..8b1d089ba5 --- /dev/null +++ b/arangod/Cache/TransactionalBucket.cpp @@ -0,0 +1,229 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/TransactionalBucket.h" +#include "Basics/Common.h" +#include "Cache/CachedValue.h" +#include "Cache/State.h" + +#include +#include + +using namespace arangodb::cache; + +size_t TransactionalBucket::SLOTS_DATA = 3; +size_t TransactionalBucket::SLOTS_BLACKLIST = 4; + +TransactionalBucket::TransactionalBucket() { + memset(this, 0, sizeof(TransactionalBucket)); +} + +bool TransactionalBucket::lock(uint64_t transactionTerm, int64_t maxTries) { + return _state.lock(maxTries, + [&]() -> void { updateBlacklistTerm(transactionTerm); }); +} + +void TransactionalBucket::unlock() { + TRI_ASSERT(isLocked()); + _state.unlock(); +} + +bool TransactionalBucket::isLocked() const { return _state.isLocked(); } + +bool TransactionalBucket::isMigrated() const { + TRI_ASSERT(isLocked()); + return _state.isSet(State::Flag::blacklisted); +} + +bool TransactionalBucket::isFullyBlacklisted() const { + TRI_ASSERT(isLocked()); + return _state.isSet(State::Flag::blacklisted); +} + +bool TransactionalBucket::isFull() const { + TRI_ASSERT(isLocked()); + bool hasEmptySlot = false; + for (size_t i = 0; i < SLOTS_DATA; i++) { + size_t slot = SLOTS_DATA - (i + 1); + if (_cachedHashes[slot] == 0) { + hasEmptySlot = true; + break; + } + } + + return !hasEmptySlot; +} + +CachedValue* TransactionalBucket::find(uint32_t hash, void const* key, + uint32_t keySize, bool moveToFront) { + TRI_ASSERT(isLocked()); + CachedValue* result = nullptr; + + for (size_t i = 0; i < SLOTS_DATA; i++) { + if (_cachedHashes[i] == 0) { + break; + } + if (_cachedHashes[i] == hash && _cachedData[i]->sameKey(key, keySize)) { + result = _cachedData[i]; + if (moveToFront) { + moveSlot(i, true); + } + break; + } + } + + return result; +} + +void TransactionalBucket::insert(uint32_t hash, CachedValue* value) { + TRI_ASSERT(isLocked()); + if (isBlacklisted(hash)) { + return; + } + + for (size_t i = 0; i < SLOTS_DATA; i++) { + if (_cachedHashes[i] == 0) { + // found an empty slot + _cachedHashes[i] = hash; + _cachedData[i] = value; + if (i != 0) { + moveSlot(i, true); + } + return; + } + } +} + +CachedValue* TransactionalBucket::remove(uint32_t hash, void const* key, + uint32_t keySize) { + TRI_ASSERT(isLocked()); + CachedValue* value = find(hash, key, keySize, false); + if (value != nullptr) { + evict(value, false); + } + + return value; +} + +void TransactionalBucket::blacklist(uint32_t hash, void const* key, + uint32_t keySize) { + TRI_ASSERT(isLocked()); + // remove key if it's here + remove(hash, key, keySize); + + if (isFullyBlacklisted()) { + return; + } + + for (size_t i = 0; i < SLOTS_BLACKLIST; i++) { + if (_blacklistHashes[i] == 0) { + // found an empty slot + _blacklistHashes[i] = hash; + return; + } + } + + // no empty slot found, fully blacklist + _state.toggleFlag(State::Flag::blacklisted); +} + +bool TransactionalBucket::isBlacklisted(uint32_t hash) const { + TRI_ASSERT(isLocked()); + if (isFullyBlacklisted()) { + return true; + } + + bool blacklisted = false; + for (size_t i = 0; i < SLOTS_BLACKLIST; i++) { + if (_blacklistHashes[i] == hash) { + blacklisted = true; + break; + } + } + + return blacklisted; +} + +CachedValue* TransactionalBucket::evictionCandidate() const { + TRI_ASSERT(isLocked()); + for (size_t i = 0; i < SLOTS_DATA; i++) { + size_t slot = SLOTS_DATA - (i + 1); + if (_cachedHashes[slot] == 0) { + continue; + } + if (_cachedData[slot]->isFreeable()) { + return _cachedData[slot]; + } + } + + return nullptr; +} + +void TransactionalBucket::evict(CachedValue* value, bool optimizeForInsertion) { + TRI_ASSERT(isLocked()); + for (size_t i = 0; i < SLOTS_DATA; i++) { + size_t slot = SLOTS_DATA - (i + 1); + if (_cachedData[slot] == value) { + // found a match + _cachedHashes[slot] = 0; + _cachedData[slot] = nullptr; + moveSlot(slot, optimizeForInsertion); + return; + } + } +} + +void TransactionalBucket::updateBlacklistTerm(uint64_t term) { + if (term > _blacklistTerm) { + _blacklistTerm = term; + + if (isFullyBlacklisted()) { + _state.toggleFlag(State::Flag::blacklisted); + } + + memset(_blacklistHashes, 0, (SLOTS_BLACKLIST * sizeof(uint32_t))); + } +} + +void TransactionalBucket::moveSlot(size_t slot, bool moveToFront) { + uint32_t hash = _cachedHashes[slot]; + CachedValue* value = _cachedData[slot]; + size_t i = slot; + if (moveToFront) { + // move slot to front + for (; i >= 1; i--) { + _cachedHashes[i] = _cachedHashes[i - 1]; + _cachedData[i] = _cachedData[i - 1]; + } + } else { + // move slot to back + for (; (i < SLOTS_DATA - 1) && (_cachedHashes[i + 1] != 0); i++) { + _cachedHashes[i] = _cachedHashes[i + 1]; + _cachedData[i] = _cachedData[i + 1]; + } + } + if (i != slot) { + _cachedHashes[i] = hash; + _cachedData[i] = value; + } +} diff --git a/arangod/Cache/TransactionalBucket.h b/arangod/Cache/TransactionalBucket.h new file mode 100644 index 0000000000..c646611994 --- /dev/null +++ b/arangod/Cache/TransactionalBucket.h @@ -0,0 +1,87 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_TRANSACTIONAL_BUCKET_H +#define ARANGODB_CACHE_TRANSACTIONAL_BUCKET_H + +#include "Basics/Common.h" +#include "Cache/CachedValue.h" +#include "Cache/State.h" + +#include +#include + +namespace arangodb { +namespace cache { + +struct alignas(64) TransactionalBucket { + State _state; + + // actual cached entries + uint32_t _cachedHashes[3]; + CachedValue* _cachedData[3]; + static size_t SLOTS_DATA; + + // blacklist entries for transactional semantics + uint32_t _blacklistHashes[4]; + uint64_t _blacklistTerm; + static size_t SLOTS_BLACKLIST; + +// padding, if necessary? +#ifdef TRI_PADDING_32 + uint32_t _padding[3]; +#endif + + TransactionalBucket(); + + // must lock before using any other operations + bool lock(uint64_t transactionTerm, int64_t maxTries); + void unlock(); + + // state checkers + bool isLocked() const; + bool isMigrated() const; + bool isFullyBlacklisted() const; + bool isFull() const; + + // primary functions + CachedValue* find(uint32_t hash, void const* key, uint32_t keySize, + bool moveToFront = true); + void insert(uint32_t hash, CachedValue* value); + CachedValue* remove(uint32_t hash, void const* key, uint32_t keySize); + void blacklist(uint32_t hash, void const* key, uint32_t keySize); + + // auxiliary functions + bool isBlacklisted(uint32_t hash) const; + CachedValue* evictionCandidate() const; + void evict(CachedValue* value, bool optimizeForInsertion); + + private: + void updateBlacklistTerm(uint64_t term); + void moveSlot(size_t slot, bool moveToFront); +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cache/TransactionalCache.cpp b/arangod/Cache/TransactionalCache.cpp new file mode 100644 index 0000000000..6144ba5876 --- /dev/null +++ b/arangod/Cache/TransactionalCache.cpp @@ -0,0 +1,100 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#include "Cache/TransactionalCache.h" +#include "Basics/Common.h" +#include "Cache/Cache.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/Metadata.h" +#include "Cache/TransactionalBucket.h" + +#include +#include +#include +#include + +using namespace arangodb::cache; + +Cache::Finding TransactionalCache::find(void const* key, uint32_t keySize) { + // TODO: implement this; + return Cache::Finding(nullptr); +} + +bool TransactionalCache::insert(CachedValue* value) { + // TODO: implement this + return false; +} + +bool TransactionalCache::remove(void const* key, uint32_t keySize) { + // TODO: implement this + return false; +} + +void TransactionalCache::blackList(void const* key, uint32_t keySize) { + // TODO: implement this +} + +std::shared_ptr TransactionalCache::create(Manager* manager, + uint64_t requestedSize, + bool allowGrowth) { + TransactionalCache* cache = + new TransactionalCache(manager, requestedSize, allowGrowth); + + if (cache == nullptr) { + return std::shared_ptr(nullptr); + } + + cache->metadata()->lock(); + auto result = cache->metadata()->cache(); + cache->metadata()->unlock(); + + return result; +} + +TransactionalCache::TransactionalCache(Manager* manager, + uint64_t requestedLimit, + bool allowGrowth) + : Cache(manager, requestedLimit, allowGrowth, [](Cache* p) -> void { + delete reinterpret_cast(p); + }) { + // TODO: implement this +} + +TransactionalCache::~TransactionalCache() { + // TODO: implement this +} + +bool TransactionalCache::freeMemory() { + // TODO: implement this + return false; +} + +bool TransactionalCache::migrate() { + // TODO: implement this + return false; +} + +void TransactionalCache::clearTables() { + // TODO: implement this +} diff --git a/arangod/Cache/TransactionalCache.h b/arangod/Cache/TransactionalCache.h new file mode 100644 index 0000000000..f9b21493b6 --- /dev/null +++ b/arangod/Cache/TransactionalCache.h @@ -0,0 +1,94 @@ +//////////////////////////////////////////////////////////////////////////////// +/// DISCLAIMER +/// +/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany +/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany +/// +/// Licensed under the Apache License, Version 2.0 (the "License"); +/// you may not use this file except in compliance with the License. +/// You may obtain a copy of the License at +/// +/// http://www.apache.org/licenses/LICENSE-2.0 +/// +/// Unless required by applicable law or agreed to in writing, software +/// distributed under the License is distributed on an "AS IS" BASIS, +/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +/// See the License for the specific language governing permissions and +/// limitations under the License. +/// +/// Copyright holder is ArangoDB GmbH, Cologne, Germany +/// +/// @author Daniel H. Larkin +//////////////////////////////////////////////////////////////////////////////// + +#ifndef ARANGODB_CACHE_TRANSACTIONAL_CACHE_H +#define ARANGODB_CACHE_TRANSACTIONAL_CACHE_H + +#include "Basics/Common.h" +#include "Cache/Cache.h" +#include "Cache/CachedValue.h" +#include "Cache/FrequencyBuffer.h" +#include "Cache/Metadata.h" +#include "Cache/TransactionalBucket.h" + +#include +#include +#include +#include + +namespace arangodb { +namespace cache { + +class Manager; // forward declaration + +class TransactionalCache final : public Cache { + public: + TransactionalCache() = delete; + TransactionalCache(TransactionalCache const&) = delete; + TransactionalCache& operator=(TransactionalCache const&) = delete; + + public: + Cache::Finding find(void const* key, uint32_t keySize); + bool insert(CachedValue* value); + bool remove(void const* key, uint32_t keySize); + void blackList(void const* key, uint32_t keySize); + + private: + // main table info + TransactionalBucket* _table; + uint32_t _logSize; + uint64_t _tableSize; + uint32_t _maskShift; + uint32_t _bucketMask; + + // auxiliary table info + TransactionalBucket* _auxiliaryTable; + uint32_t _auxiliaryLogSize; + uint64_t _auxiliaryTableSize; + uint32_t _auxiliaryMaskShift; + uint32_t _auxiliaryBucketMask; + + // friend class manager and tasks + friend class FreeMemoryTask; + friend class Manager; + friend class MigrateTask; + + private: + // creator -- do not use constructor explicitly + static std::shared_ptr create(Manager* manager, uint64_t requestedSize, + bool allowGrowth); + + TransactionalCache(Manager* manager, uint64_t requestedLimit, + bool allowGrowth); + ~TransactionalCache(); + + // management + bool freeMemory(); + bool migrate(); + void clearTables(); +}; + +}; // end namespace cache +}; // end namespace arangodb + +#endif diff --git a/arangod/Cluster/AgencyCallbackRegistry.cpp b/arangod/Cluster/AgencyCallbackRegistry.cpp index 0b5a2dbb55..180779882b 100644 --- a/arangod/Cluster/AgencyCallbackRegistry.cpp +++ b/arangod/Cluster/AgencyCallbackRegistry.cpp @@ -46,9 +46,7 @@ AgencyCallbackRegistry::AgencyCallbackRegistry(std::string const& callbackBasePa AgencyCallbackRegistry::~AgencyCallbackRegistry() { } -AgencyCommResult AgencyCallbackRegistry::registerCallback( - std::shared_ptr cb) { - +bool AgencyCallbackRegistry::registerCallback(std::shared_ptr cb) { uint32_t rand; { WRITE_LOCKER(locker, _lock); @@ -60,28 +58,23 @@ AgencyCommResult AgencyCallbackRegistry::registerCallback( } } - AgencyCommResult result; + bool ok = false; try { - result = _agency.registerCallback(cb->key, getEndpointUrl(rand)); - if (!result.successful()) { - LOG_TOPIC(ERR, arangodb::Logger::AGENCY) - << "Registering callback failed with " << result.errorCode() << ": " - << result.errorMessage(); + ok = _agency.registerCallback(cb->key, getEndpointUrl(rand)).successful(); + if (!ok) { + LOG_TOPIC(ERR, Logger::CLUSTER) << "Registering callback failed"; } } catch (std::exception const& e) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) - << "Couldn't register callback " << e.what(); + LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't register callback " << e.what(); } catch (...) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) + LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't register callback. Unknown exception"; } - - if (!result.successful()) { + if (!ok) { WRITE_LOCKER(locker, _lock); _endpoints.erase(rand); } - - return result; + return ok; } std::shared_ptr AgencyCallbackRegistry::getCallback(uint32_t id) { diff --git a/arangod/Cluster/AgencyCallbackRegistry.h b/arangod/Cluster/AgencyCallbackRegistry.h index 34b3baa7b5..7e3bc567c9 100644 --- a/arangod/Cluster/AgencyCallbackRegistry.h +++ b/arangod/Cluster/AgencyCallbackRegistry.h @@ -44,7 +44,7 @@ public: ////////////////////////////////////////////////////////////////////////////// /// @brief register a callback ////////////////////////////////////////////////////////////////////////////// - AgencyCommResult registerCallback(std::shared_ptr); + bool registerCallback(std::shared_ptr); ////////////////////////////////////////////////////////////////////////////// /// @brief unregister a callback diff --git a/arangod/Cluster/ClusterInfo.cpp b/arangod/Cluster/ClusterInfo.cpp index 61b48ff51d..5b2a765f78 100644 --- a/arangod/Cluster/ClusterInfo.cpp +++ b/arangod/Cluster/ClusterInfo.cpp @@ -512,12 +512,12 @@ void ClusterInfo::loadPlan() { // This should not happen in healthy situations. // If it happens in unhealthy situations the // cluster should not fail. - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Failed to load information for collection '" - << collectionId << "': " << ex.what() - << ". invalid information in plan. The collection will " - "be ignored for now and the invalid information will " - "be repaired. VelocyPack: " - << collectionSlice.toJson(); + LOG_TOPIC(ERR, Logger::AGENCY) + << "Failed to load information for collection '" << collectionId + << "': " << ex.what() << ". invalid information in plan. The" + "collection will be ignored for now and the invalid information" + "will be repaired. VelocyPack: " + << collectionSlice.toJson(); TRI_ASSERT(false); continue; @@ -526,12 +526,12 @@ void ClusterInfo::loadPlan() { // This should not happen in healthy situations. // If it happens in unhealthy situations the // cluster should not fail. - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Failed to load information for collection '" - << collectionId - << ". invalid information in plan. The collection will " - "be ignored for now and the invalid information will " - "be repaired. VelocyPack: " - << collectionSlice.toJson(); + LOG_TOPIC(ERR, Logger::AGENCY) + << "Failed to load information for collection '" << collectionId + << ". invalid information in plan. The collection will " + "be ignored for now and the invalid information will " + "be repaired. VelocyPack: " + << collectionSlice.toJson(); TRI_ASSERT(false); continue; @@ -886,14 +886,7 @@ int ClusterInfo::createDatabaseCoordinator(std::string const& name, // AgencyCallback for this. auto agencyCallback = std::make_shared( ac, "Current/Databases/" + name, dbServerChanged, true, false); - - auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback); - if (!regres.successful()) { - LOG_TOPIC(DEBUG, Logger::CLUSTER) << - "Could not register call back with error: " << regres.errorCode() << - " - " << regres.errorMessage(); - } - + _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); AgencyOperation newVal("Plan/Databases/" + name, @@ -988,14 +981,7 @@ int ClusterInfo::dropDatabaseCoordinator(std::string const& name, // AgencyCallback for this. auto agencyCallback = std::make_shared(ac, where, dbServerChanged, true, false); - - auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback); - if (!regres.successful()) { - LOG_TOPIC(DEBUG, Logger::CLUSTER) << - "Could not register call back with error: " << regres.errorCode() << - " - " << regres.errorMessage(); - } - + _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); // Transact to agency @@ -1151,14 +1137,7 @@ int ClusterInfo::createCollectionCoordinator(std::string const& databaseName, auto agencyCallback = std::make_shared( ac, "Current/Collections/" + databaseName + "/" + collectionID, dbServerChanged, true, false); - - auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback); - if (!regres.successful()) { - LOG_TOPIC(DEBUG, Logger::CLUSTER) << - "Could not register call back with error: " << regres.errorCode() << - " - " << regres.errorMessage(); - } - + _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); AgencyOperation createCollection( @@ -1283,14 +1262,7 @@ int ClusterInfo::dropCollectionCoordinator(std::string const& databaseName, // AgencyCallback for this. auto agencyCallback = std::make_shared(ac, where, dbServerChanged, true, false); - - auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback); - if (!regres.successful()) { - LOG_TOPIC(DEBUG, Logger::CLUSTER) << - "Could not register call back with error: " << regres.errorCode() << - " - " << regres.errorMessage(); - } - + _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); size_t numberOfShards = 0; @@ -1541,8 +1513,6 @@ int ClusterInfo::ensureIndexCoordinator( if (idxSlice.isString()) { // use predefined index id iid = arangodb::basics::StringUtils::uint64(idxSlice.copyString()); - } else if (idxSlice.isNumber()) { - iid = idxSlice.getNumber(); } if (iid == 0) { @@ -1778,14 +1748,7 @@ int ClusterInfo::ensureIndexCoordinator( // AgencyCallback for this. auto agencyCallback = std::make_shared(ac, where, dbServerChanged, true, false); - - auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback); - if (!regres.successful()) { - LOG_TOPIC(DEBUG, Logger::CLUSTER) << - "Could not register call back with error: " << regres.errorCode() << - " - " << regres.errorMessage(); - } - + _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); AgencyOperation newValue(key, AgencyValueOperationType::SET, @@ -1812,7 +1775,6 @@ int ClusterInfo::ensureIndexCoordinator( errorMsg += trx.toJson(); errorMsg += "ClientId: " + result._clientId + " "; errorMsg += " ResultCode: " + std::to_string(result.errorCode()) + " "; - errorMsg += " Result: " + result.errorMessage() + " "; errorMsg += std::string(__FILE__) + ":" + std::to_string(__LINE__); resultBuilder = *resBuilder; } @@ -1952,14 +1914,7 @@ int ClusterInfo::dropIndexCoordinator(std::string const& databaseName, // AgencyCallback for this. auto agencyCallback = std::make_shared(ac, where, dbServerChanged, true, false); - - auto regres = _agencyCallbackRegistry->registerCallback(agencyCallback); - if (!regres.successful()) { - LOG_TOPIC(DEBUG, Logger::CLUSTER) << - "Could not register call back with error: " << regres.errorCode() << - " - " << regres.errorMessage(); - } - + _agencyCallbackRegistry->registerCallback(agencyCallback); TRI_DEFER(_agencyCallbackRegistry->unregisterCallback(agencyCallback)); loadPlan(); @@ -2442,26 +2397,28 @@ std::shared_ptr> ClusterInfo::getResponsibleServer( while (true) { { - READ_LOCKER(readLocker, _currentProt.lock); - // _shardIds is a map-type >> - auto it = _shardIds.find(shardID); + { + READ_LOCKER(readLocker, _currentProt.lock); + // _shardIds is a map-type >> + auto it = _shardIds.find(shardID); - if (it != _shardIds.end()) { - auto serverList = (*it).second; - if (serverList != nullptr && serverList->size() > 0 && - (*serverList)[0].size() > 0 && (*serverList)[0][0] == '_') { - // This is a temporary situation in which the leader has already - // resigned, let's wait half a second and try again. - --tries; - LOG_TOPIC(INFO, Logger::CLUSTER) - << "getResponsibleServer: found resigned leader," - << "waiting for half a second..."; - usleep(500000); - } else { - return (*it).second; + if (it != _shardIds.end()) { + auto serverList = (*it).second; + if (serverList != nullptr && serverList->size() > 0 && + (*serverList)[0].size() > 0 && (*serverList)[0][0] == '_') { + // This is a temporary situation in which the leader has already + // resigned, let's wait half a second and try again. + --tries; + LOG_TOPIC(INFO, Logger::CLUSTER) + << "getResponsibleServer: found resigned leader," + << "waiting for half a second..."; + } else { + return (*it).second; + } } } + usleep(500000); } if (++tries >= 2) { diff --git a/arangod/Cluster/HeartbeatThread.cpp b/arangod/Cluster/HeartbeatThread.cpp index ad973da55d..f3911a45eb 100644 --- a/arangod/Cluster/HeartbeatThread.cpp +++ b/arangod/Cluster/HeartbeatThread.cpp @@ -241,7 +241,7 @@ void HeartbeatThread::runDBServer() { bool registered = false; while (!registered) { registered = - _agencyCallbackRegistry->registerCallback(planAgencyCallback).successful(); + _agencyCallbackRegistry->registerCallback(planAgencyCallback); if (!registered) { LOG_TOPIC(ERR, Logger::HEARTBEAT) << "Couldn't register plan change in agency!"; diff --git a/arangod/Cluster/TraverserEngine.cpp b/arangod/Cluster/TraverserEngine.cpp index 27dee519c0..c4ae1e23b6 100644 --- a/arangod/Cluster/TraverserEngine.cpp +++ b/arangod/Cluster/TraverserEngine.cpp @@ -294,7 +294,7 @@ bool BaseTraverserEngine::lockCollection(std::string const& shard) { if (cid == 0) { return false; } - _trx->orderDitch(cid); // will throw when it fails + _trx->pinData(cid); // will throw when it fails int res = _trx->lock(_trx->trxCollection(cid), AccessMode::Type::READ); if (res != TRI_ERROR_NO_ERROR) { LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Logging Shard " << shard << " lead to exception '" diff --git a/arangod/GeneralServer/GeneralServerFeature.cpp b/arangod/GeneralServer/GeneralServerFeature.cpp index 4615bc2691..7d125c3c44 100644 --- a/arangod/GeneralServer/GeneralServerFeature.cpp +++ b/arangod/GeneralServer/GeneralServerFeature.cpp @@ -213,13 +213,17 @@ static bool SetRequestContext(GeneralRequest* request, void* data) { if (vocbase == nullptr) { return false; } + + TRI_ASSERT(!vocbase->isDangling()); // database needs upgrade if (vocbase->state() == TRI_vocbase_t::State::FAILED_VERSION) { request->setRequestPath("/_msg/please-upgrade"); + vocbase->release(); return false; } + // the vocbase context is now responsible for releasing the vocbase VocbaseContext* ctx = new arangodb::VocbaseContext(request, vocbase); request->setRequestContext(ctx, true); diff --git a/arangod/MMFiles/MMFilesAqlFunctions.cpp b/arangod/MMFiles/MMFilesAqlFunctions.cpp index 7ba5ff8389..dabf55989f 100644 --- a/arangod/MMFiles/MMFilesAqlFunctions.cpp +++ b/arangod/MMFiles/MMFilesAqlFunctions.cpp @@ -165,7 +165,7 @@ static arangodb::MMFilesGeoIndex* getGeoIndex( collectionName.c_str()); } - trx->orderDitch(cid); + trx->pinData(cid); return index; } @@ -260,7 +260,7 @@ AqlValue MMFilesAqlFunctions::Fulltext( collectionName.c_str()); } - trx->orderDitch(cid); + trx->pinData(cid); TRI_fulltext_query_t* ft = TRI_CreateQueryMMFilesFulltextIndex(TRI_FULLTEXT_SEARCH_MAX_WORDS, maxResults); @@ -286,7 +286,7 @@ AqlValue MMFilesAqlFunctions::Fulltext( THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); } - TRI_ASSERT(trx->hasDitch(cid)); + TRI_ASSERT(trx->isPinned(cid)); transaction::BuilderLeaser builder(trx); try { @@ -364,7 +364,7 @@ AqlValue MMFilesAqlFunctions::Near(arangodb::aql::Query* query, arangodb::MMFilesGeoIndex* index = getGeoIndex(trx, cid, collectionName); TRI_ASSERT(index != nullptr); - TRI_ASSERT(trx->hasDitch(cid)); + TRI_ASSERT(trx->isPinned(cid)); GeoCoordinates* cors = index->nearQuery( trx, latitude.toDouble(trx), longitude.toDouble(trx), static_cast(limitValue)); @@ -415,7 +415,7 @@ AqlValue MMFilesAqlFunctions::Within( arangodb::MMFilesGeoIndex* index = getGeoIndex(trx, cid, collectionName); TRI_ASSERT(index != nullptr); - TRI_ASSERT(trx->hasDitch(cid)); + TRI_ASSERT(trx->isPinned(cid)); GeoCoordinates* cors = index->withinQuery( trx, latitudeValue.toDouble(trx), longitudeValue.toDouble(trx), radiusValue.toDouble(trx)); diff --git a/arangod/MMFiles/MMFilesCleanupThread.cpp b/arangod/MMFiles/MMFilesCleanupThread.cpp index ac67dcc77e..2a00ac719e 100644 --- a/arangod/MMFiles/MMFilesCleanupThread.cpp +++ b/arangod/MMFiles/MMFilesCleanupThread.cpp @@ -170,8 +170,8 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec TRI_ASSERT(ditches != nullptr); // check and remove all callback elements at the beginning of the list - auto callback = [&](arangodb::Ditch const* ditch) -> bool { - if (ditch->type() == arangodb::Ditch::TRI_DITCH_COLLECTION_UNLOAD) { + auto callback = [&](arangodb::MMFilesDitch const* ditch) -> bool { + if (ditch->type() == arangodb::MMFilesDitch::TRI_DITCH_COLLECTION_UNLOAD) { // check if we can really unload, this is only the case if the // collection's WAL markers // were fully collected @@ -215,9 +215,9 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec if (gotStatus && !isUnloading) { popped = false; auto unloader = - ditches->process(popped, [](arangodb::Ditch const* ditch) -> bool { + ditches->process(popped, [](arangodb::MMFilesDitch const* ditch) -> bool { return (ditch->type() == - arangodb::Ditch::TRI_DITCH_COLLECTION_UNLOAD); + arangodb::MMFilesDitch::TRI_DITCH_COLLECTION_UNLOAD); }); if (popped) { // we've changed the list. try with current state in next turn @@ -264,17 +264,17 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec // execute callback, some of the callbacks might delete or unload our collection auto const type = ditch->type(); - if (type == arangodb::Ditch::TRI_DITCH_DATAFILE_DROP) { - dynamic_cast(ditch)->executeCallback(); + if (type == arangodb::MMFilesDitch::TRI_DITCH_DATAFILE_DROP) { + dynamic_cast(ditch)->executeCallback(); delete ditch; // next iteration - } else if (type == arangodb::Ditch::TRI_DITCH_DATAFILE_RENAME) { - dynamic_cast(ditch)->executeCallback(); + } else if (type == arangodb::MMFilesDitch::TRI_DITCH_DATAFILE_RENAME) { + dynamic_cast(ditch)->executeCallback(); delete ditch; // next iteration - } else if (type == arangodb::Ditch::TRI_DITCH_COLLECTION_UNLOAD) { + } else if (type == arangodb::MMFilesDitch::TRI_DITCH_COLLECTION_UNLOAD) { // collection will be unloaded - bool hasUnloaded = dynamic_cast(ditch) + bool hasUnloaded = dynamic_cast(ditch) ->executeCallback(); delete ditch; @@ -282,9 +282,9 @@ void MMFilesCleanupThread::cleanupCollection(arangodb::LogicalCollection* collec // this has unloaded and freed the collection return; } - } else if (type == arangodb::Ditch::TRI_DITCH_COLLECTION_DROP) { + } else if (type == arangodb::MMFilesDitch::TRI_DITCH_COLLECTION_DROP) { // collection will be dropped - bool hasDropped = dynamic_cast(ditch) + bool hasDropped = dynamic_cast(ditch) ->executeCallback(); delete ditch; diff --git a/arangod/MMFiles/MMFilesCollection.cpp b/arangod/MMFiles/MMFilesCollection.cpp index ebf65ef155..d285b59cd4 100644 --- a/arangod/MMFiles/MMFilesCollection.cpp +++ b/arangod/MMFiles/MMFilesCollection.cpp @@ -1116,7 +1116,7 @@ void MMFilesCollection::figuresSpecific(std::shared_ptradd("time", VPackValue(&lastCompactionStampString[0])); builder->close(); // compactionStatus - builder->add("documentReferences", VPackValue(_ditches.numDocumentDitches())); + builder->add("documentReferences", VPackValue(_ditches.numMMFilesDocumentMMFilesDitches())); char const* waitingForDitch = _ditches.head(); builder->add("waitingFor", VPackValue(waitingForDitch == nullptr ? "-" : waitingForDitch)); @@ -1660,6 +1660,9 @@ void MMFilesCollection::open(bool ignoreErrors) { arangodb::SingleCollectionTransaction trx( arangodb::StandaloneTransactionContext::Create(vocbase), cid, AccessMode::Type::WRITE); + // the underlying collections must not be locked here because the "load" + // routine can be invoked from any other place, e.g. from an AQL query + trx.addHint(transaction::Hints::Hint::LOCK_NEVER); // build the primary index double startIterate = TRI_microtime(); @@ -1945,7 +1948,6 @@ bool MMFilesCollection::dropIndex(TRI_idx_iid_t iid) { } auto vocbase = _logicalCollection->vocbase(); - arangodb::aql::QueryCache::instance()->invalidate(vocbase, _logicalCollection->name()); if (!_logicalCollection->removeIndex(iid)) { // We tried to remove an index that does not exist events::DropIndex("", std::to_string(iid), diff --git a/arangod/MMFiles/MMFilesCollection.h b/arangod/MMFiles/MMFilesCollection.h index 613092b498..58fd779958 100644 --- a/arangod/MMFiles/MMFilesCollection.h +++ b/arangod/MMFiles/MMFilesCollection.h @@ -219,7 +219,7 @@ class MMFilesCollection final : public PhysicalCollection { double lastCompactionStamp() const { return _lastCompactionStamp; } void lastCompactionStamp(double value) { _lastCompactionStamp = value; } - Ditches* ditches() const { return &_ditches; } + MMFilesDitches* ditches() const { return &_ditches; } void open(bool ignoreErrors) override; @@ -467,7 +467,7 @@ class MMFilesCollection final : public PhysicalCollection { bool& waitForSync); private: - mutable arangodb::Ditches _ditches; + mutable arangodb::MMFilesDitches _ditches; // lock protecting the indexes mutable basics::ReadWriteLock _idxLock; diff --git a/arangod/MMFiles/MMFilesCollectorCache.h b/arangod/MMFiles/MMFilesCollectorCache.h index 5f6d8628b9..82aa0d4ce2 100644 --- a/arangod/MMFiles/MMFilesCollectorCache.h +++ b/arangod/MMFiles/MMFilesCollectorCache.h @@ -76,7 +76,7 @@ struct MMFilesCollectorCache { ~MMFilesCollectorCache() { delete operations; - freeDitches(); + freeMMFilesDitches(); } /// @brief return a reference to an existing datafile statistics struct @@ -99,15 +99,15 @@ struct MMFilesCollectorCache { } /// @brief add a ditch - void addDitch(arangodb::DocumentDitch* ditch) { + void addDitch(arangodb::MMFilesDocumentDitch* ditch) { TRI_ASSERT(ditch != nullptr); ditches.emplace_back(ditch); } /// @brief free all ditches - void freeDitches() { + void freeMMFilesDitches() { for (auto& it : ditches) { - it->ditches()->freeDocumentDitch(it, false); + it->ditches()->freeMMFilesDocumentDitch(it, false); } ditches.clear(); @@ -129,7 +129,7 @@ struct MMFilesCollectorCache { std::vector* operations; /// @brief ditches held by the operations - std::vector ditches; + std::vector ditches; /// @brief datafile info cache, updated when the collector transfers markers std::unordered_map dfi; diff --git a/arangod/MMFiles/MMFilesCompactorThread.cpp b/arangod/MMFiles/MMFilesCompactorThread.cpp index 384d8cc1ce..7c4c35bb45 100644 --- a/arangod/MMFiles/MMFilesCompactorThread.cpp +++ b/arangod/MMFiles/MMFilesCompactorThread.cpp @@ -555,7 +555,7 @@ void MMFilesCompactorThread::compactDatafiles(LogicalCollection* collection, // add a deletion ditch to the collection auto b = arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->createDropDatafileDitch(compaction._datafile, collection, + ->createMMFilesDropDatafileDitch(compaction._datafile, collection, DropDatafileCallback, __FILE__, __LINE__); @@ -586,7 +586,7 @@ void MMFilesCompactorThread::compactDatafiles(LogicalCollection* collection, // add a rename marker auto b = arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->createRenameDatafileDitch( + ->createMMFilesRenameDatafileDitch( compaction._datafile, context->_compactor, context->_collection, RenameDatafileCallback, __FILE__, __LINE__); @@ -604,7 +604,7 @@ void MMFilesCompactorThread::compactDatafiles(LogicalCollection* collection, // add a drop datafile marker auto b = arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->createDropDatafileDitch(compaction._datafile, collection, + ->createMMFilesDropDatafileDitch(compaction._datafile, collection, DropDatafileCallback, __FILE__, __LINE__); @@ -907,7 +907,7 @@ void MMFilesCompactorThread::run() { auto ce = arangodb::MMFilesCollection::toMMFilesCollection( collection) ->ditches() - ->createCompactionDitch(__FILE__, __LINE__); + ->createMMFilesCompactionDitch(__FILE__, __LINE__); if (ce == nullptr) { // out of memory diff --git a/arangod/MMFiles/MMFilesDitch.cpp b/arangod/MMFiles/MMFilesDitch.cpp index eb4d2c75e3..84ccc309e5 100644 --- a/arangod/MMFiles/MMFilesDitch.cpp +++ b/arangod/MMFiles/MMFilesDitch.cpp @@ -28,96 +28,96 @@ using namespace arangodb; -Ditch::Ditch(Ditches* ditches, char const* filename, int line) +MMFilesDitch::MMFilesDitch(MMFilesDitches* ditches, char const* filename, int line) : _ditches(ditches), _prev(nullptr), _next(nullptr), _filename(filename), _line(line) {} -Ditch::~Ditch() {} +MMFilesDitch::~MMFilesDitch() {} /// @brief return the associated collection -LogicalCollection* Ditch::collection() const { +LogicalCollection* MMFilesDitch::collection() const { return _ditches->collection(); } -DocumentDitch::DocumentDitch(Ditches* ditches, bool usedByTransaction, +MMFilesDocumentDitch::MMFilesDocumentDitch(MMFilesDitches* ditches, bool usedByTransaction, char const* filename, int line) - : Ditch(ditches, filename, line), + : MMFilesDitch(ditches, filename, line), _usedByTransaction(usedByTransaction) {} -DocumentDitch::~DocumentDitch() {} +MMFilesDocumentDitch::~MMFilesDocumentDitch() {} -ReplicationDitch::ReplicationDitch(Ditches* ditches, char const* filename, +MMFilesReplicationDitch::MMFilesReplicationDitch(MMFilesDitches* ditches, char const* filename, int line) - : Ditch(ditches, filename, line) {} + : MMFilesDitch(ditches, filename, line) {} -ReplicationDitch::~ReplicationDitch() {} +MMFilesReplicationDitch::~MMFilesReplicationDitch() {} -CompactionDitch::CompactionDitch(Ditches* ditches, char const* filename, +MMFilesCompactionDitch::MMFilesCompactionDitch(MMFilesDitches* ditches, char const* filename, int line) - : Ditch(ditches, filename, line) {} + : MMFilesDitch(ditches, filename, line) {} -CompactionDitch::~CompactionDitch() {} +MMFilesCompactionDitch::~MMFilesCompactionDitch() {} -DropDatafileDitch::DropDatafileDitch( - Ditches* ditches, MMFilesDatafile* datafile, LogicalCollection* collection, +MMFilesDropDatafileDitch::MMFilesDropDatafileDitch( + MMFilesDitches* ditches, MMFilesDatafile* datafile, LogicalCollection* collection, std::function const& callback, char const* filename, int line) - : Ditch(ditches, filename, line), + : MMFilesDitch(ditches, filename, line), _datafile(datafile), _collection(collection), _callback(callback) {} -DropDatafileDitch::~DropDatafileDitch() { delete _datafile; } +MMFilesDropDatafileDitch::~MMFilesDropDatafileDitch() { delete _datafile; } -RenameDatafileDitch::RenameDatafileDitch( - Ditches* ditches, MMFilesDatafile* datafile, MMFilesDatafile* compactor, +MMFilesRenameDatafileDitch::MMFilesRenameDatafileDitch( + MMFilesDitches* ditches, MMFilesDatafile* datafile, MMFilesDatafile* compactor, LogicalCollection* collection, std::function const& callback, char const* filename, int line) - : Ditch(ditches, filename, line), + : MMFilesDitch(ditches, filename, line), _datafile(datafile), _compactor(compactor), _collection(collection), _callback(callback) {} -RenameDatafileDitch::~RenameDatafileDitch() {} +MMFilesRenameDatafileDitch::~MMFilesRenameDatafileDitch() {} -UnloadCollectionDitch::UnloadCollectionDitch( - Ditches* ditches, LogicalCollection* collection, +MMFilesUnloadCollectionDitch::MMFilesUnloadCollectionDitch( + MMFilesDitches* ditches, LogicalCollection* collection, std::function const& callback, char const* filename, int line) - : Ditch(ditches, filename, line), + : MMFilesDitch(ditches, filename, line), _collection(collection), _callback(callback) {} -UnloadCollectionDitch::~UnloadCollectionDitch() {} +MMFilesUnloadCollectionDitch::~MMFilesUnloadCollectionDitch() {} -DropCollectionDitch::DropCollectionDitch( - Ditches* ditches, arangodb::LogicalCollection* collection, +MMFilesDropCollectionDitch::MMFilesDropCollectionDitch( + MMFilesDitches* ditches, arangodb::LogicalCollection* collection, std::function callback, char const* filename, int line) - : Ditch(ditches, filename, line), + : MMFilesDitch(ditches, filename, line), _collection(collection), _callback(callback) {} -DropCollectionDitch::~DropCollectionDitch() {} +MMFilesDropCollectionDitch::~MMFilesDropCollectionDitch() {} -Ditches::Ditches(LogicalCollection* collection) +MMFilesDitches::MMFilesDitches(LogicalCollection* collection) : _collection(collection), _lock(), _begin(nullptr), _end(nullptr), - _numDocumentDitches(0) { + _numMMFilesDocumentMMFilesDitches(0) { TRI_ASSERT(_collection != nullptr); } -Ditches::~Ditches() { destroy(); } +MMFilesDitches::~MMFilesDitches() { destroy(); } /// @brief destroy the ditches - to be called on shutdown only -void Ditches::destroy() { +void MMFilesDitches::destroy() { MUTEX_LOCKER(mutexLocker, _lock); auto* ptr = _begin; @@ -125,14 +125,14 @@ void Ditches::destroy() { auto* next = ptr->next(); auto const type = ptr->type(); - if (type == Ditch::TRI_DITCH_COLLECTION_UNLOAD || - type == Ditch::TRI_DITCH_COLLECTION_DROP || - type == Ditch::TRI_DITCH_DATAFILE_DROP || - type == Ditch::TRI_DITCH_DATAFILE_RENAME || - type == Ditch::TRI_DITCH_REPLICATION || - type == Ditch::TRI_DITCH_COMPACTION) { + if (type == MMFilesDitch::TRI_DITCH_COLLECTION_UNLOAD || + type == MMFilesDitch::TRI_DITCH_COLLECTION_DROP || + type == MMFilesDitch::TRI_DITCH_DATAFILE_DROP || + type == MMFilesDitch::TRI_DITCH_DATAFILE_RENAME || + type == MMFilesDitch::TRI_DITCH_REPLICATION || + type == MMFilesDitch::TRI_DITCH_COMPACTION) { delete ptr; - } else if (type == Ditch::TRI_DITCH_DOCUMENT) { + } else if (type == MMFilesDitch::TRI_DITCH_DOCUMENT) { LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "logic error. shouldn't have document ditches on unload"; TRI_ASSERT(false); } else { @@ -144,20 +144,20 @@ void Ditches::destroy() { } /// @brief return the associated collection -LogicalCollection* Ditches::collection() const { return _collection; } +LogicalCollection* MMFilesDitches::collection() const { return _collection; } /// @brief run a user-defined function under the lock -void Ditches::executeProtected(std::function callback) { +void MMFilesDitches::executeProtected(std::function callback) { MUTEX_LOCKER(mutexLocker, _lock); callback(); } /// @brief process the first element from the list /// the list will remain unchanged if the first element is either a -/// DocumentDitch, a ReplicationDitch or a CompactionDitch, or if the list -/// contains any DocumentDitches. -Ditch* Ditches::process(bool& popped, - std::function callback) { +/// MMFilesDocumentDitch, a MMFilesReplicationDitch or a MMFilesCompactionDitch, or if the list +/// contains any MMFilesDocumentMMFilesDitches. +MMFilesDitch* MMFilesDitches::process(bool& popped, + std::function callback) { popped = false; MUTEX_LOCKER(mutexLocker, _lock); @@ -173,11 +173,11 @@ Ditch* Ditches::process(bool& popped, auto const type = ditch->type(); - // if it is a DocumentDitch, it means that there is still a reference held + // if it is a MMFilesDocumentDitch, it means that there is still a reference held // to document data in a datafile. We must then not unload or remove a file - if (type == Ditch::TRI_DITCH_DOCUMENT || - type == Ditch::TRI_DITCH_REPLICATION || - type == Ditch::TRI_DITCH_COMPACTION || _numDocumentDitches > 0) { + if (type == MMFilesDitch::TRI_DITCH_DOCUMENT || + type == MMFilesDitch::TRI_DITCH_REPLICATION || + type == MMFilesDitch::TRI_DITCH_COMPACTION || _numMMFilesDocumentMMFilesDitches > 0) { // did not find anything at the head of the barrier list or found an element // marker // this means we must exit and cannot throw away datafiles and can unload @@ -185,11 +185,11 @@ Ditch* Ditches::process(bool& popped, return nullptr; } - // no DocumentDitch at the head of the ditches list. This means that there is + // no MMFilesDocumentDitch at the head of the ditches list. This means that there is // some other action we can perform (i.e. unloading a datafile or a // collection) - // note that there is no need to check the entire list for a DocumentDitch as + // note that there is no need to check the entire list for a MMFilesDocumentDitch as // the list is filled up in chronological order. New ditches are always added // to the // tail of the list, and if we have the following list @@ -218,7 +218,7 @@ Ditch* Ditches::process(bool& popped, } /// @brief return the type name of the ditch at the head of the active ditches -char const* Ditches::head() { +char const* MMFilesDitches::head() { MUTEX_LOCKER(mutexLocker, _lock); auto ditch = _begin; @@ -230,19 +230,19 @@ char const* Ditches::head() { } /// @brief return the number of document ditches active -uint64_t Ditches::numDocumentDitches() { +uint64_t MMFilesDitches::numMMFilesDocumentMMFilesDitches() { MUTEX_LOCKER(mutexLocker, _lock); - return _numDocumentDitches; + return _numMMFilesDocumentMMFilesDitches; } /// @brief check whether the ditches contain a ditch of a certain type -bool Ditches::contains(Ditch::DitchType type) { +bool MMFilesDitches::contains(MMFilesDitch::DitchType type) { MUTEX_LOCKER(mutexLocker, _lock); - if (type == Ditch::TRI_DITCH_DOCUMENT) { + if (type == MMFilesDitch::TRI_DITCH_DOCUMENT) { // shortcut - return (_numDocumentDitches > 0); + return (_numMMFilesDocumentMMFilesDitches > 0); } auto const* ptr = _begin; @@ -259,19 +259,19 @@ bool Ditches::contains(Ditch::DitchType type) { } /// @brief removes and frees a ditch -void Ditches::freeDitch(Ditch* ditch) { +void MMFilesDitches::freeDitch(MMFilesDitch* ditch) { TRI_ASSERT(ditch != nullptr); - bool const isDocumentDitch = (ditch->type() == Ditch::TRI_DITCH_DOCUMENT); + bool const isMMFilesDocumentDitch = (ditch->type() == MMFilesDitch::TRI_DITCH_DOCUMENT); { MUTEX_LOCKER(mutexLocker, _lock); unlink(ditch); - if (isDocumentDitch) { + if (isMMFilesDocumentDitch) { // decrease counter - --_numDocumentDitches; + --_numMMFilesDocumentMMFilesDitches; } } @@ -281,7 +281,7 @@ void Ditches::freeDitch(Ditch* ditch) { /// @brief removes and frees a ditch /// this is used for ditches used by transactions or by externals to protect /// the flags by the lock -void Ditches::freeDocumentDitch(DocumentDitch* ditch, bool fromTransaction) { +void MMFilesDitches::freeMMFilesDocumentDitch(MMFilesDocumentDitch* ditch, bool fromTransaction) { TRI_ASSERT(ditch != nullptr); // First see who might still be using the ditch: @@ -295,17 +295,17 @@ void Ditches::freeDocumentDitch(DocumentDitch* ditch, bool fromTransaction) { unlink(ditch); // decrease counter - --_numDocumentDitches; + --_numMMFilesDocumentMMFilesDitches; } delete ditch; } /// @brief creates a new document ditch and links it -DocumentDitch* Ditches::createDocumentDitch(bool usedByTransaction, +MMFilesDocumentDitch* MMFilesDitches::createMMFilesDocumentDitch(bool usedByTransaction, char const* filename, int line) { try { - auto ditch = new DocumentDitch(this, usedByTransaction, filename, line); + auto ditch = new MMFilesDocumentDitch(this, usedByTransaction, filename, line); link(ditch); return ditch; @@ -315,10 +315,10 @@ DocumentDitch* Ditches::createDocumentDitch(bool usedByTransaction, } /// @brief creates a new replication ditch and links it -ReplicationDitch* Ditches::createReplicationDitch(char const* filename, +MMFilesReplicationDitch* MMFilesDitches::createMMFilesReplicationDitch(char const* filename, int line) { try { - auto ditch = new ReplicationDitch(this, filename, line); + auto ditch = new MMFilesReplicationDitch(this, filename, line); link(ditch); return ditch; @@ -328,10 +328,10 @@ ReplicationDitch* Ditches::createReplicationDitch(char const* filename, } /// @brief creates a new compaction ditch and links it -CompactionDitch* Ditches::createCompactionDitch(char const* filename, +MMFilesCompactionDitch* MMFilesDitches::createMMFilesCompactionDitch(char const* filename, int line) { try { - auto ditch = new CompactionDitch(this, filename, line); + auto ditch = new MMFilesCompactionDitch(this, filename, line); link(ditch); return ditch; @@ -341,13 +341,13 @@ CompactionDitch* Ditches::createCompactionDitch(char const* filename, } /// @brief creates a new datafile deletion ditch -DropDatafileDitch* Ditches::createDropDatafileDitch( +MMFilesDropDatafileDitch* MMFilesDitches::createMMFilesDropDatafileDitch( MMFilesDatafile* datafile, LogicalCollection* collection, std::function const& callback, char const* filename, int line) { try { auto ditch = - new DropDatafileDitch(this, datafile, collection, callback, filename, line); + new MMFilesDropDatafileDitch(this, datafile, collection, callback, filename, line); link(ditch); return ditch; @@ -357,13 +357,13 @@ DropDatafileDitch* Ditches::createDropDatafileDitch( } /// @brief creates a new datafile rename ditch -RenameDatafileDitch* Ditches::createRenameDatafileDitch( +MMFilesRenameDatafileDitch* MMFilesDitches::createMMFilesRenameDatafileDitch( MMFilesDatafile* datafile, MMFilesDatafile* compactor, LogicalCollection* collection, std::function const& callback, char const* filename, int line) { try { auto ditch = - new RenameDatafileDitch(this, datafile, compactor, collection, callback, filename, line); + new MMFilesRenameDatafileDitch(this, datafile, compactor, collection, callback, filename, line); link(ditch); return ditch; @@ -373,12 +373,12 @@ RenameDatafileDitch* Ditches::createRenameDatafileDitch( } /// @brief creates a new collection unload ditch -UnloadCollectionDitch* Ditches::createUnloadCollectionDitch( +MMFilesUnloadCollectionDitch* MMFilesDitches::createMMFilesUnloadCollectionDitch( LogicalCollection* collection, std::function const& callback, char const* filename, int line) { try { - auto ditch = new UnloadCollectionDitch(this, collection, callback, + auto ditch = new MMFilesUnloadCollectionDitch(this, collection, callback, filename, line); link(ditch); @@ -389,12 +389,12 @@ UnloadCollectionDitch* Ditches::createUnloadCollectionDitch( } /// @brief creates a new datafile drop ditch -DropCollectionDitch* Ditches::createDropCollectionDitch( +MMFilesDropCollectionDitch* MMFilesDitches::createMMFilesDropCollectionDitch( arangodb::LogicalCollection* collection, std::function callback, char const* filename, int line) { try { - auto ditch = new DropCollectionDitch(this, collection, callback, + auto ditch = new MMFilesDropCollectionDitch(this, collection, callback, filename, line); link(ditch); @@ -405,13 +405,13 @@ DropCollectionDitch* Ditches::createDropCollectionDitch( } /// @brief inserts the ditch into the linked list of ditches -void Ditches::link(Ditch* ditch) { +void MMFilesDitches::link(MMFilesDitch* ditch) { TRI_ASSERT(ditch != nullptr); ditch->_next = nullptr; ditch->_prev = nullptr; - bool const isDocumentDitch = (ditch->type() == Ditch::TRI_DITCH_DOCUMENT); + bool const isMMFilesDocumentDitch = (ditch->type() == MMFilesDitch::TRI_DITCH_DOCUMENT); MUTEX_LOCKER(mutexLocker, _lock); // FIX_MUTEX @@ -428,14 +428,14 @@ void Ditches::link(Ditch* ditch) { _end = ditch; - if (isDocumentDitch) { + if (isMMFilesDocumentDitch) { // increase counter - ++_numDocumentDitches; + ++_numMMFilesDocumentMMFilesDitches; } } /// @brief unlinks the ditch from the linked list of ditches -void Ditches::unlink(Ditch* ditch) { +void MMFilesDitches::unlink(MMFilesDitch* ditch) { // ditch is at the beginning of the chain if (ditch->_prev == nullptr) { _begin = ditch->_next; diff --git a/arangod/MMFiles/MMFilesDitch.h b/arangod/MMFiles/MMFilesDitch.h index 66230edfb6..4f249ed48d 100644 --- a/arangod/MMFiles/MMFilesDitch.h +++ b/arangod/MMFiles/MMFilesDitch.h @@ -32,19 +32,19 @@ struct MMFilesDatafile; namespace arangodb { class LogicalCollection; -class Ditches; +class MMFilesDitches; -class Ditch { - friend class Ditches; +class MMFilesDitch { + friend class MMFilesDitches; protected: - Ditch(Ditch const&) = delete; - Ditch& operator=(Ditch const&) = delete; + MMFilesDitch(MMFilesDitch const&) = delete; + MMFilesDitch& operator=(MMFilesDitch const&) = delete; - Ditch(Ditches*, char const*, int); + MMFilesDitch(MMFilesDitches*, char const*, int); public: - virtual ~Ditch(); + virtual ~MMFilesDitch(); public: /// @brief ditch type @@ -72,33 +72,33 @@ class Ditch { int line() const { return _line; } /// @brief return the next ditch in the linked list - inline Ditch* next() const { return _next; } + inline MMFilesDitch* next() const { return _next; } /// @brief return the link to all ditches - Ditches* ditches() { return _ditches; } + MMFilesDitches* ditches() { return _ditches; } /// @brief return the associated collection LogicalCollection* collection() const; protected: - Ditches* _ditches; + MMFilesDitches* _ditches; private: - Ditch* _prev; - Ditch* _next; + MMFilesDitch* _prev; + MMFilesDitch* _next; char const* _filename; int _line; }; /// @brief document ditch -class DocumentDitch final : public Ditch { - friend class Ditches; +class MMFilesDocumentDitch final : public MMFilesDitch { + friend class MMFilesDitches; public: - DocumentDitch(Ditches* ditches, bool usedByTransaction, char const* filename, + MMFilesDocumentDitch(MMFilesDitches* ditches, bool usedByTransaction, char const* filename, int line); - ~DocumentDitch(); + ~MMFilesDocumentDitch(); public: DitchType type() const override final { return TRI_DITCH_DOCUMENT; } @@ -112,11 +112,11 @@ class DocumentDitch final : public Ditch { }; /// @brief replication ditch -class ReplicationDitch final : public Ditch { +class MMFilesReplicationDitch final : public MMFilesDitch { public: - ReplicationDitch(Ditches* ditches, char const* filename, int line); + MMFilesReplicationDitch(MMFilesDitches* ditches, char const* filename, int line); - ~ReplicationDitch(); + ~MMFilesReplicationDitch(); public: DitchType type() const override final { return TRI_DITCH_REPLICATION; } @@ -125,11 +125,11 @@ class ReplicationDitch final : public Ditch { }; /// @brief compaction ditch -class CompactionDitch final : public Ditch { +class MMFilesCompactionDitch final : public MMFilesDitch { public: - CompactionDitch(Ditches* ditches, char const* filename, int line); + MMFilesCompactionDitch(MMFilesDitches* ditches, char const* filename, int line); - ~CompactionDitch(); + ~MMFilesCompactionDitch(); public: DitchType type() const override final { return TRI_DITCH_COMPACTION; } @@ -138,14 +138,14 @@ class CompactionDitch final : public Ditch { }; /// @brief datafile removal ditch -class DropDatafileDitch final : public Ditch { +class MMFilesDropDatafileDitch final : public MMFilesDitch { public: - DropDatafileDitch(Ditches* ditches, MMFilesDatafile* datafile, + MMFilesDropDatafileDitch(MMFilesDitches* ditches, MMFilesDatafile* datafile, LogicalCollection* collection, std::function const& callback, char const* filename, int line); - ~DropDatafileDitch(); + ~MMFilesDropDatafileDitch(); public: DitchType type() const override final { return TRI_DITCH_DATAFILE_DROP; } @@ -161,14 +161,14 @@ class DropDatafileDitch final : public Ditch { }; /// @brief datafile rename ditch -class RenameDatafileDitch final : public Ditch { +class MMFilesRenameDatafileDitch final : public MMFilesDitch { public: - RenameDatafileDitch(Ditches* ditches, MMFilesDatafile* datafile, + MMFilesRenameDatafileDitch(MMFilesDitches* ditches, MMFilesDatafile* datafile, MMFilesDatafile* compactor, LogicalCollection* collection, std::function const& callback, char const* filename, int line); - ~RenameDatafileDitch(); + ~MMFilesRenameDatafileDitch(); public: DitchType type() const override final { return TRI_DITCH_DATAFILE_RENAME; } @@ -185,14 +185,14 @@ class RenameDatafileDitch final : public Ditch { }; /// @brief collection unload ditch -class UnloadCollectionDitch final : public Ditch { +class MMFilesUnloadCollectionDitch final : public MMFilesDitch { public: - UnloadCollectionDitch( - Ditches* ditches, LogicalCollection* collection, + MMFilesUnloadCollectionDitch( + MMFilesDitches* ditches, LogicalCollection* collection, std::function const& callback, char const* filename, int line); - ~UnloadCollectionDitch(); + ~MMFilesUnloadCollectionDitch(); DitchType type() const override final { return TRI_DITCH_COLLECTION_UNLOAD; } @@ -206,14 +206,14 @@ class UnloadCollectionDitch final : public Ditch { }; /// @brief collection drop ditch -class DropCollectionDitch final : public Ditch { +class MMFilesDropCollectionDitch final : public MMFilesDitch { public: - DropCollectionDitch( - arangodb::Ditches* ditches, arangodb::LogicalCollection* collection, + MMFilesDropCollectionDitch( + arangodb::MMFilesDitches* ditches, arangodb::LogicalCollection* collection, std::function callback, char const* filename, int line); - ~DropCollectionDitch(); + ~MMFilesDropCollectionDitch(); DitchType type() const override final { return TRI_DITCH_COLLECTION_DROP; } @@ -227,14 +227,14 @@ class DropCollectionDitch final : public Ditch { }; /// @brief doubly linked list of ditches -class Ditches { +class MMFilesDitches { public: - Ditches(Ditches const&) = delete; - Ditches& operator=(Ditches const&) = delete; - Ditches() = delete; + MMFilesDitches(MMFilesDitches const&) = delete; + MMFilesDitches& operator=(MMFilesDitches const&) = delete; + MMFilesDitches() = delete; - explicit Ditches(LogicalCollection*); - ~Ditches(); + explicit MMFilesDitches(LogicalCollection*); + ~MMFilesDitches(); public: /// @brief destroy the ditches - to be called on shutdown only @@ -248,75 +248,75 @@ class Ditches { /// @brief process the first element from the list /// the list will remain unchanged if the first element is either a - /// DocumentDitch, a ReplicationDitch or a CompactionDitch, or if the list - /// contains any DocumentDitches. - Ditch* process(bool&, std::function); + /// MMFilesDocumentDitch, a MMFilesReplicationDitch or a MMFilesCompactionDitch, or if the list + /// contains any MMFilesDocumentMMFilesDitches. + MMFilesDitch* process(bool&, std::function); /// @brief return the type name of the ditch at the head of the active ditches char const* head(); /// @brief return the number of document ditches active - uint64_t numDocumentDitches(); + uint64_t numMMFilesDocumentMMFilesDitches(); /// @brief check whether the ditches contain a ditch of a certain type - bool contains(Ditch::DitchType); + bool contains(MMFilesDitch::DitchType); /// @brief unlinks and frees a ditch - void freeDitch(Ditch*); + void freeDitch(MMFilesDitch*); /// @brief unlinks and frees a document ditch /// this is used for ditches used by transactions or by externals to protect /// the flags by the lock - void freeDocumentDitch(DocumentDitch*, bool fromTransaction); + void freeMMFilesDocumentDitch(MMFilesDocumentDitch*, bool fromTransaction); /// @brief creates a new document ditch and links it - DocumentDitch* createDocumentDitch(bool usedByTransaction, + MMFilesDocumentDitch* createMMFilesDocumentDitch(bool usedByTransaction, char const* filename, int line); /// @brief creates a new replication ditch and links it - ReplicationDitch* createReplicationDitch(char const* filename, int line); + MMFilesReplicationDitch* createMMFilesReplicationDitch(char const* filename, int line); /// @brief creates a new compaction ditch and links it - CompactionDitch* createCompactionDitch(char const* filename, int line); + MMFilesCompactionDitch* createMMFilesCompactionDitch(char const* filename, int line); /// @brief creates a new datafile deletion ditch - DropDatafileDitch* createDropDatafileDitch( + MMFilesDropDatafileDitch* createMMFilesDropDatafileDitch( MMFilesDatafile* datafile, LogicalCollection* collection, std::function const& callback, char const* filename, int line); /// @brief creates a new datafile rename ditch - RenameDatafileDitch* createRenameDatafileDitch( + MMFilesRenameDatafileDitch* createMMFilesRenameDatafileDitch( MMFilesDatafile* datafile, MMFilesDatafile* compactor, LogicalCollection* collection, std::function const& callback, char const* filename, int line); /// @brief creates a new collection unload ditch - UnloadCollectionDitch* createUnloadCollectionDitch( + MMFilesUnloadCollectionDitch* createMMFilesUnloadCollectionDitch( LogicalCollection* collection, std::function const& callback, char const* filename, int line); /// @brief creates a new collection drop ditch - DropCollectionDitch* createDropCollectionDitch( + MMFilesDropCollectionDitch* createMMFilesDropCollectionDitch( arangodb::LogicalCollection* collection, std::function callback, char const* filename, int line); private: /// @brief inserts the ditch into the linked list of ditches - void link(Ditch*); + void link(MMFilesDitch*); /// @brief unlinks the ditch from the linked list of ditches - void unlink(Ditch*); + void unlink(MMFilesDitch*); private: LogicalCollection* _collection; arangodb::Mutex _lock; - Ditch* _begin; - Ditch* _end; - uint64_t _numDocumentDitches; + MMFilesDitch* _begin; + MMFilesDitch* _end; + uint64_t _numMMFilesDocumentMMFilesDitches; }; } diff --git a/arangod/MMFiles/MMFilesEngine.cpp b/arangod/MMFiles/MMFilesEngine.cpp index 70b26fbb71..50108c2b8b 100644 --- a/arangod/MMFiles/MMFilesEngine.cpp +++ b/arangod/MMFiles/MMFilesEngine.cpp @@ -2220,7 +2220,7 @@ char* MMFilesEngine::nextFreeMarkerPosition( // the compactor will not run during recovery auto ditch = arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->createDocumentDitch(false, __FILE__, __LINE__); + ->createMMFilesDocumentDitch(false, __FILE__, __LINE__); if (ditch == nullptr) { THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); diff --git a/arangod/MMFiles/MMFilesLogfileManager.cpp b/arangod/MMFiles/MMFilesLogfileManager.cpp index 4783b6ce0d..b1f7cac313 100644 --- a/arangod/MMFiles/MMFilesLogfileManager.cpp +++ b/arangod/MMFiles/MMFilesLogfileManager.cpp @@ -535,7 +535,7 @@ void MMFilesLogfileManager::unprepare() { } // registers a transaction -int MMFilesLogfileManager::registerTransaction(TRI_voc_tid_t transactionId) { +int MMFilesLogfileManager::registerTransaction(TRI_voc_tid_t transactionId, bool isReadOnlyTransaction) { auto lastCollectedId = _lastCollectedId.load(); auto lastSealedId = _lastSealedId.load(); @@ -546,6 +546,16 @@ int MMFilesLogfileManager::registerTransaction(TRI_voc_tid_t transactionId) { TRI_ASSERT(lastCollectedId <= lastSealedId); + if (isReadOnlyTransaction) { + // in case this is a read-only transaction, we are sure that the transaction can + // only see committed data (as itself it will not write anything, and write transactions + // run exclusively). we thus can allow the WAL collector to already seal and collect + // logfiles. the only thing that needs to be ensured for read-only transactions is + // that a logfile does not get thrown away while the read-only transaction is + // ongoing + lastSealedId = 0; + } + try { auto data = std::make_unique(lastCollectedId, lastSealedId); TransactionManagerFeature::MANAGER->registerTransaction(transactionId, std::move(data)); @@ -1336,8 +1346,8 @@ MMFilesWalLogfile* MMFilesLogfileManager::getCollectableLogfile() { // iterate over all active readers and find their minimum used logfile id MMFilesWalLogfile::IdType minId = UINT64_MAX; - auto cb = [&minId](TRI_voc_tid_t, TransactionData* data) { - MMFilesWalLogfile::IdType lastWrittenId = static_cast(data)->lastSealedId; + auto cb = [&minId](TRI_voc_tid_t, TransactionData const* data) { + MMFilesWalLogfile::IdType lastWrittenId = static_cast(data)->lastSealedId; if (lastWrittenId < minId && lastWrittenId != 0) { minId = lastWrittenId; @@ -1383,8 +1393,8 @@ MMFilesWalLogfile* MMFilesLogfileManager::getRemovableLogfile() { MMFilesWalLogfile::IdType minId = UINT64_MAX; // iterate over all active transactions and find their minimum used logfile id - auto cb = [&minId](TRI_voc_tid_t, TransactionData* data) { - MMFilesWalLogfile::IdType lastCollectedId = static_cast(data)->lastCollectedId; + auto cb = [&minId](TRI_voc_tid_t, TransactionData const* data) { + MMFilesWalLogfile::IdType lastCollectedId = static_cast(data)->lastCollectedId; if (lastCollectedId < minId && lastCollectedId != 0) { minId = lastCollectedId; @@ -1553,15 +1563,15 @@ MMFilesLogfileManager::runningTransactions() { MMFilesWalLogfile::IdType lastCollectedId = UINT64_MAX; MMFilesWalLogfile::IdType lastSealedId = UINT64_MAX; - auto cb = [&count, &lastCollectedId, &lastSealedId](TRI_voc_tid_t, TransactionData* data) { + auto cb = [&count, &lastCollectedId, &lastSealedId](TRI_voc_tid_t, TransactionData const* data) { ++count; - MMFilesWalLogfile::IdType value = static_cast(data)->lastCollectedId; + MMFilesWalLogfile::IdType value = static_cast(data)->lastCollectedId; if (value < lastCollectedId && value != 0) { lastCollectedId = value; } - value = static_cast(data)->lastSealedId; + value = static_cast(data)->lastSealedId; if (value < lastSealedId && value != 0) { lastSealedId = value; } diff --git a/arangod/MMFiles/MMFilesLogfileManager.h b/arangod/MMFiles/MMFilesLogfileManager.h index 00995adc8b..1b96176ae3 100644 --- a/arangod/MMFiles/MMFilesLogfileManager.h +++ b/arangod/MMFiles/MMFilesLogfileManager.h @@ -68,8 +68,8 @@ struct MMFilesTransactionData final : public TransactionData { MMFilesTransactionData() = delete; MMFilesTransactionData(MMFilesWalLogfile::IdType lastCollectedId, MMFilesWalLogfile::IdType lastSealedId) : lastCollectedId(lastCollectedId), lastSealedId(lastSealedId) {} - MMFilesWalLogfile::IdType lastCollectedId; - MMFilesWalLogfile::IdType lastSealedId; + MMFilesWalLogfile::IdType const lastCollectedId; + MMFilesWalLogfile::IdType const lastSealedId; }; struct MMFilesLogfileManagerState { @@ -218,7 +218,7 @@ class MMFilesLogfileManager final : public application_features::ApplicationFeat } // registers a transaction - int registerTransaction(TRI_voc_tid_t); + int registerTransaction(TRI_voc_tid_t id, bool isReadOnlyTransaction); // return the set of dropped collections /// this is used during recovery and not used afterwards @@ -459,8 +459,8 @@ class MMFilesLogfileManager final : public application_features::ApplicationFeat bool _allowOversizeEntries = true; bool _useMLock = false; - std::string _directory = ""; - uint32_t _historicLogfiles = 10; + std::string _directory; + uint32_t _historicLogfiles = 10; bool _ignoreLogfileErrors = false; bool _ignoreRecoveryErrors = false; uint64_t _flushTimeout = 15000; diff --git a/arangod/MMFiles/MMFilesTransactionState.cpp b/arangod/MMFiles/MMFilesTransactionState.cpp index 32734ba438..ec16163c35 100644 --- a/arangod/MMFiles/MMFilesTransactionState.cpp +++ b/arangod/MMFiles/MMFilesTransactionState.cpp @@ -104,8 +104,8 @@ int MMFilesTransactionState::beginTransaction(transaction::Hints hints) { _id = TRI_NewTickServer(); // register a protector - int res = logfileManager->registerTransaction(_id); - + int res = logfileManager->registerTransaction(_id, isReadOnlyTransaction()); + if (res != TRI_ERROR_NO_ERROR) { return res; } @@ -195,6 +195,12 @@ int MMFilesTransactionState::abortTransaction(transaction::Methods* activeTrx) { updateStatus(transaction::Status::ABORTED); + if (_hasOperations) { + // must clean up the query cache because the transaction + // may have queried something via AQL that is now rolled back + clearQueryCache(); + } + freeOperations(activeTrx); } @@ -336,6 +342,9 @@ int MMFilesTransactionState::addOperation(TRI_voc_rid_t revisionId, copy.release(); operation.swapped(); _hasOperations = true; + + arangodb::aql::QueryCache::instance()->invalidate( + _vocbase, collection->name()); } physical->setRevision(revisionId, false); diff --git a/arangod/Replication/InitialSyncer.cpp b/arangod/Replication/InitialSyncer.cpp index 14b0fd86a3..213526b855 100644 --- a/arangod/Replication/InitialSyncer.cpp +++ b/arangod/Replication/InitialSyncer.cpp @@ -789,7 +789,7 @@ int InitialSyncer::handleCollectionDump( return res; } - trx.orderDitch(col->cid()); // will throw when it fails + trx.pinData(col->cid()); // will throw when it fails if (res == TRI_ERROR_NO_ERROR) { res = applyCollectionDump(trx, collectionName, response.get(), markersProcessed, errorMsg); @@ -1038,7 +1038,7 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col, // fetch all local keys from primary index std::vector markers; - DocumentDitch* ditch = nullptr; + MMFilesDocumentDitch* ditch = nullptr; // acquire a replication ditch so no datafiles are thrown away from now on // note: the ditch also protects against unloading the collection @@ -1054,7 +1054,7 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col, ditch = arangodb::MMFilesCollection::toMMFilesCollection(col) ->ditches() - ->createDocumentDitch(false, __FILE__, __LINE__); + ->createMMFilesDocumentDitch(false, __FILE__, __LINE__); if (ditch == nullptr) { return TRI_ERROR_OUT_OF_MEMORY; @@ -1288,7 +1288,7 @@ int InitialSyncer::handleSyncKeys(arangodb::LogicalCollection* col, return res; } - trx.orderDitch(col->cid()); // will throw when it fails + trx.pinData(col->cid()); // will throw when it fails // We do not take responsibility for the index. // The LogicalCollection is protected by trx. @@ -1871,7 +1871,7 @@ int InitialSyncer::handleCollection(VPackSlice const& parameters, return res; } - trx.orderDitch(col->cid()); // will throw when it fails + trx.pinData(col->cid()); // will throw when it fails LogicalCollection* document = trx.documentCollection(); TRI_ASSERT(document != nullptr); diff --git a/arangod/RestHandler/RestEdgesHandler.cpp b/arangod/RestHandler/RestEdgesHandler.cpp index d3cf4029e6..322cd6e5f5 100644 --- a/arangod/RestHandler/RestEdgesHandler.cpp +++ b/arangod/RestHandler/RestEdgesHandler.cpp @@ -96,7 +96,7 @@ bool RestEdgesHandler::getEdgesForVertex( std::string const& id, std::string const& collectionName, TRI_edge_direction_e direction, SingleCollectionTransaction& trx, std::function cb) { - trx.orderDitch(trx.cid()); // will throw when it fails + trx.pinData(trx.cid()); // will throw when it fails // Create a conditionBuilder that manages the AstNodes for querying aql::EdgeConditionBuilderContainer condBuilder; diff --git a/arangod/RestHandler/RestReplicationHandler.cpp b/arangod/RestHandler/RestReplicationHandler.cpp index 05ffb74a78..52d13ab80d 100644 --- a/arangod/RestHandler/RestReplicationHandler.cpp +++ b/arangod/RestHandler/RestReplicationHandler.cpp @@ -1199,7 +1199,7 @@ void RestReplicationHandler::handleCommandClusterInventory() { /// @brief creates a collection, based on the VelocyPack provided TODO: MOVE //////////////////////////////////////////////////////////////////////////////// -int RestReplicationHandler::createCollection(VPackSlice const& slice, +int RestReplicationHandler::createCollection(VPackSlice slice, arangodb::LogicalCollection** dst, bool reuseId) { if (dst != nullptr) { @@ -1242,17 +1242,20 @@ int RestReplicationHandler::createCollection(VPackSlice const& slice, return TRI_ERROR_NO_ERROR; } - int res = TRI_ERROR_NO_ERROR; - try { - col = _vocbase->createCollection(slice, cid, true); - } catch (basics::Exception const& ex) { - res = ex.code(); - } catch (...) { - res = TRI_ERROR_INTERNAL; - } + // always use current version number when restoring a collection, + // because the collection is effectively NEW + VPackBuilder patch; + patch.openObject(); + patch.add("version", VPackValue(LogicalCollection::VERSION_31)); + patch.close(); + + VPackBuilder builder = VPackCollection::merge(slice, patch.slice(), false); + slice = builder.slice(); - if (res != TRI_ERROR_NO_ERROR) { - return res; + col = _vocbase->createCollection(slice, cid, true); + + if (col == nullptr) { + return TRI_ERROR_INTERNAL; } TRI_ASSERT(col != nullptr); @@ -1660,6 +1663,10 @@ int RestReplicationHandler::processRestoreCollectionCoordinator( TRI_ASSERT(replicationFactor > 0); toMerge.add("replicationFactor", VPackValue(replicationFactor)); } + + // always use current version number when restoring a collection, + // because the collection is effectively NEW + toMerge.add("version", VPackValue(LogicalCollection::VERSION_31)); toMerge.close(); // TopLevel VPackSlice const type = parameters.get("type"); @@ -1675,6 +1682,7 @@ int RestReplicationHandler::processRestoreCollectionCoordinator( VPackBuilder mergedBuilder = VPackCollection::merge(parameters, sliceToMerge, false); VPackSlice const merged = mergedBuilder.slice(); + try { auto col = ClusterMethods::createCollectionOnCoordinator(collectionType, _vocbase, merged); diff --git a/arangod/RestHandler/RestReplicationHandler.h b/arangod/RestHandler/RestReplicationHandler.h index 551a885086..336cee5214 100644 --- a/arangod/RestHandler/RestReplicationHandler.h +++ b/arangod/RestHandler/RestReplicationHandler.h @@ -163,7 +163,7 @@ class RestReplicationHandler : public RestVocbaseBaseHandler { /// @brief creates a collection, based on the VelocyPack provided TODO: MOVE ////////////////////////////////////////////////////////////////////////////// - int createCollection(VPackSlice const&, arangodb::LogicalCollection**, bool); + int createCollection(VPackSlice, arangodb::LogicalCollection**, bool); ////////////////////////////////////////////////////////////////////////////// /// @brief handle a restore command for a specific collection diff --git a/arangod/RestServer/DatabaseFeature.cpp b/arangod/RestServer/DatabaseFeature.cpp index 2498969ad5..7b3bd2cd6b 100644 --- a/arangod/RestServer/DatabaseFeature.cpp +++ b/arangod/RestServer/DatabaseFeature.cpp @@ -25,6 +25,7 @@ #include "Agency/v8-agency.h" #include "ApplicationFeatures/ApplicationServer.h" +#include "Aql/PlanCache.h" #include "Aql/QueryCache.h" #include "Aql/QueryRegistry.h" #include "Basics/ArangoGlobalContext.h" @@ -90,7 +91,7 @@ void DatabaseManagerThread::run() { auto theLists = databaseFeature->_databasesLists.load(); for (TRI_vocbase_t* vocbase : theLists->_droppedDatabases) { - if (!vocbase->canBeDropped()) { + if (!vocbase->isDangling()) { continue; } @@ -133,10 +134,12 @@ void DatabaseManagerThread::run() { // not possible that another thread has seen this very database // and tries to free it at the same time! } - + if (database->type() != TRI_VOCBASE_TYPE_COORDINATOR) { // regular database // --------------------------- + + TRI_ASSERT(!database->isSystem()); LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "physically removing database directory '" << engine->databasePath(database) << "' of database '" @@ -606,7 +609,8 @@ int DatabaseFeature::createDatabase(TRI_voc_tick_t id, std::string const& name, } // increase reference counter - vocbase->use(); + bool result = vocbase->use(); + TRI_ASSERT(result); } { @@ -718,13 +722,6 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, // mark as deleted TRI_ASSERT(vocbase->type() == TRI_VOCBASE_TYPE_NORMAL); - if (!vocbase->markAsDropped()) { - // deleted by someone else? - delete newLists; - events::DropDatabase(name, TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); - return TRI_ERROR_ARANGO_DATABASE_NOT_FOUND; - } - newLists->_databases.erase(it); newLists->_droppedDatabases.insert(vocbase); } @@ -739,14 +736,21 @@ int DatabaseFeature::dropDatabase(std::string const& name, bool writeMarker, _databasesLists = newLists; _databasesProtector.scan(); delete oldLists; + + TRI_ASSERT(!vocbase->isSystem()); + bool result = vocbase->markAsDropped(); + TRI_ASSERT(result); vocbase->setIsOwnAppsDirectory(removeAppsDirectory); // invalidate all entries for the database + arangodb::aql::PlanCache::instance()->invalidate(vocbase); arangodb::aql::QueryCache::instance()->invalidate(vocbase); engine->prepareDropDatabase(vocbase, writeMarker, res); } + // must not use the database after here, as it may now be + // deleted by the DatabaseManagerThread! if (res == TRI_ERROR_NO_ERROR && waitForDeletion) { engine->waitUntilDeletion(id, true, res); @@ -812,6 +816,9 @@ std::vector DatabaseFeature::getDatabaseIds( for (auto& p : theLists->_databases) { TRI_vocbase_t* vocbase = p.second; TRI_ASSERT(vocbase != nullptr); + if (vocbase->isDropped()) { + continue; + } if (includeSystem || vocbase->name() != TRI_VOC_SYSTEM_DATABASE) { ids.emplace_back(vocbase->id()); } @@ -832,7 +839,9 @@ std::vector DatabaseFeature::getDatabaseNames() { for (auto& p : theLists->_databases) { TRI_vocbase_t* vocbase = p.second; TRI_ASSERT(vocbase != nullptr); - + if (vocbase->isDropped()) { + continue; + } names.emplace_back(vocbase->name()); } } @@ -856,6 +865,9 @@ std::vector DatabaseFeature::getDatabaseNamesForUser( for (auto& p : theLists->_databases) { TRI_vocbase_t* vocbase = p.second; TRI_ASSERT(vocbase != nullptr); + if (vocbase->isDropped()) { + continue; + } auto authentication = application_features::ApplicationServer::getFeature< AuthenticationFeature>("Authentication"); @@ -877,7 +889,8 @@ std::vector DatabaseFeature::getDatabaseNamesForUser( } void DatabaseFeature::useSystemDatabase() { - useDatabase(TRI_VOC_SYSTEM_DATABASE); + bool result = useDatabase(TRI_VOC_SYSTEM_DATABASE); + TRI_ASSERT(result); } /// @brief get a coordinator database by its id @@ -924,8 +937,9 @@ TRI_vocbase_t* DatabaseFeature::useDatabase(std::string const& name) { if (it != theLists->_databases.end()) { TRI_vocbase_t* vocbase = it->second; - vocbase->use(); - return vocbase; + if (vocbase->use()) { + return vocbase; + } } return nullptr; @@ -939,8 +953,10 @@ TRI_vocbase_t* DatabaseFeature::useDatabase(TRI_voc_tick_t id) { TRI_vocbase_t* vocbase = p.second; if (vocbase->id() == id) { - vocbase->use(); - return vocbase; + if (vocbase->use()) { + return vocbase; + } + break; } } diff --git a/arangod/RestServer/VocbaseContext.cpp b/arangod/RestServer/VocbaseContext.cpp index c16e9d7574..dd995eca6a 100644 --- a/arangod/RestServer/VocbaseContext.cpp +++ b/arangod/RestServer/VocbaseContext.cpp @@ -54,9 +54,15 @@ VocbaseContext::VocbaseContext(GeneralRequest* request, TRI_vocbase_t* vocbase) TRI_ASSERT(_vocbase != nullptr); _authentication = FeatureCacheFeature::instance()->authenticationFeature(); TRI_ASSERT(_authentication != nullptr); + + // _vocbase has already been refcounted for us + TRI_ASSERT(!_vocbase->isDangling()); } -VocbaseContext::~VocbaseContext() { _vocbase->release(); } +VocbaseContext::~VocbaseContext() { + TRI_ASSERT(!_vocbase->isDangling()); + _vocbase->release(); +} //////////////////////////////////////////////////////////////////////////////// /// @brief checks the authentication diff --git a/arangod/RestServer/VocbaseContext.h b/arangod/RestServer/VocbaseContext.h index 3fe388f501..e1865a892d 100644 --- a/arangod/RestServer/VocbaseContext.h +++ b/arangod/RestServer/VocbaseContext.h @@ -45,6 +45,9 @@ class VocbaseContext : public arangodb::RequestContext { static double ServerSessionTtl; public: + VocbaseContext(VocbaseContext const&) = delete; + VocbaseContext& operator=(VocbaseContext const&) = delete; + VocbaseContext(GeneralRequest*, TRI_vocbase_t*); ~VocbaseContext(); diff --git a/arangod/RestServer/arangod.cpp b/arangod/RestServer/arangod.cpp index de4789c5ba..60502e8e7c 100644 --- a/arangod/RestServer/arangod.cpp +++ b/arangod/RestServer/arangod.cpp @@ -28,7 +28,6 @@ #include "Actions/ActionFeature.h" #include "Agency/AgencyFeature.h" -#include "Aql/AqlFunctionFeature.h" #include "ApplicationFeatures/ConfigFeature.h" #include "ApplicationFeatures/DaemonFeature.h" #include "ApplicationFeatures/GreetingsFeature.h" @@ -41,7 +40,9 @@ #include "ApplicationFeatures/TempFeature.h" #include "ApplicationFeatures/V8PlatformFeature.h" #include "ApplicationFeatures/VersionFeature.h" +#include "Aql/AqlFunctionFeature.h" #include "Basics/ArangoGlobalContext.h" +#include "Cache/CacheManagerFeature.h" #include "Cluster/ClusterFeature.h" #include "GeneralServer/AuthenticationFeature.h" #include "GeneralServer/GeneralServerFeature.h" @@ -77,14 +78,14 @@ #include "StorageEngine/EngineSelectorFeature.h" // TODO - the following MMFiles includes should probably be removed -#include "MMFiles/MMFilesLogfileManager.h" +#include "MMFiles/MMFilesLogfileManager.h" #include "MMFiles/MMFilesPersistentIndexFeature.h" #include "MMFiles/MMFilesWalRecoveryFeature.h" // #include "StorageEngine/RocksDBEngine.h" // enable when adding Rocksdb Engine - // this include will be disabled until - // we begin to implement the RocksDB - // engine +// this include will be disabled until +// we begin to implement the RocksDB +// engine #include "MMFiles/MMFilesEngine.h" #include "V8Server/FoxxQueuesFeature.h" #include "V8Server/V8DealerFeature.h" @@ -130,7 +131,9 @@ static int runServer(int argc, char** argv) { server.addFeature(new AuthenticationFeature(&server)); server.addFeature(new AqlFeature(&server)); server.addFeature(new BootstrapFeature(&server)); - server.addFeature(new CheckVersionFeature(&server, &ret, nonServerFeatures)); + server.addFeature(new CacheManagerFeature(&server)); + server.addFeature( + new CheckVersionFeature(&server, &ret, nonServerFeatures)); server.addFeature(new ClusterFeature(&server)); server.addFeature(new ConfigFeature(&server, name)); server.addFeature(new ConsoleFeature(&server)); @@ -191,7 +194,8 @@ static int runServer(int argc, char** argv) { // storage engines server.addFeature(new MMFilesEngine(&server)); server.addFeature(new MMFilesWalRecoveryFeature(&server)); - //server.addFeature(new RocksDBEngine(&server)); //enable RocksDB storage here + // server.addFeature(new RocksDBEngine(&server)); //enable RocksDB storage + // here try { server.run(argc, argv); @@ -200,23 +204,27 @@ static int runServer(int argc, char** argv) { ret = EXIT_SUCCESS; } } catch (std::exception const& ex) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "arangod terminated because of an unhandled exception: " - << ex.what(); + LOG_TOPIC(ERR, arangodb::Logger::FIXME) + << "arangod terminated because of an unhandled exception: " + << ex.what(); ret = EXIT_FAILURE; } catch (...) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "arangod terminated because of an unhandled exception of " - "unknown type"; + LOG_TOPIC(ERR, arangodb::Logger::FIXME) + << "arangod terminated because of an unhandled exception of " + "unknown type"; ret = EXIT_FAILURE; } Logger::flush(); return context.exit(ret); } catch (std::exception const& ex) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "arangod terminated because of an unhandled exception: " - << ex.what(); + LOG_TOPIC(ERR, arangodb::Logger::FIXME) + << "arangod terminated because of an unhandled exception: " + << ex.what(); } catch (...) { - LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "arangod terminated because of an unhandled exception of " - "unknown type"; + LOG_TOPIC(ERR, arangodb::Logger::FIXME) + << "arangod terminated because of an unhandled exception of " + "unknown type"; } exit(EXIT_FAILURE); } diff --git a/arangod/Scheduler/JobQueue.cpp b/arangod/Scheduler/JobQueue.cpp index 6acb6c5dd4..fda1239562 100644 --- a/arangod/Scheduler/JobQueue.cpp +++ b/arangod/Scheduler/JobQueue.cpp @@ -76,7 +76,10 @@ class JobQueueThread final : public Thread { std::unique_ptr releaseGuard(job); - job->_callback(std::move(job->_handler)); + try { + job->_callback(std::move(job->_handler)); + } catch (...) { + } jobQueue->releaseActive(); jobQueue->wakeup(); }); diff --git a/arangod/Scheduler/Scheduler.cpp b/arangod/Scheduler/Scheduler.cpp index ca6e3313ce..1f3505bf5b 100644 --- a/arangod/Scheduler/Scheduler.cpp +++ b/arangod/Scheduler/Scheduler.cpp @@ -212,6 +212,10 @@ bool Scheduler::start(ConditionVariable* cv) { _nrRealMaximum = 4 * _nrMaximal; } + if (_nrRealMaximum <= 64) { + _nrRealMaximum = 64; + } + for (size_t i = 0; i < 2; ++i) { startNewThread(); } diff --git a/arangod/Scheduler/SocketTcp.cpp b/arangod/Scheduler/SocketTcp.cpp index 606af39252..f1d8dc9b84 100644 --- a/arangod/Scheduler/SocketTcp.cpp +++ b/arangod/Scheduler/SocketTcp.cpp @@ -24,7 +24,9 @@ using namespace arangodb; -size_t SocketTcp::write(basics::StringBuffer* buffer, boost::system::error_code& ec) { +size_t SocketTcp::write(basics::StringBuffer* buffer, + boost::system::error_code& ec) { + MUTEX_LOCKER(guard, _lock); if (_encrypted) { return socketcommon::doWrite(_sslSocket, buffer, ec); } else { @@ -32,7 +34,9 @@ size_t SocketTcp::write(basics::StringBuffer* buffer, boost::system::error_code& } } -void SocketTcp::asyncWrite(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) { +void SocketTcp::asyncWrite(boost::asio::mutable_buffers_1 const& buffer, + AsyncHandler const& handler) { + MUTEX_LOCKER(guard, _lock); if (_encrypted) { return socketcommon::doAsyncWrite(_sslSocket, buffer, handler); } else { @@ -40,7 +44,9 @@ void SocketTcp::asyncWrite(boost::asio::mutable_buffers_1 const& buffer, AsyncHa } } -size_t SocketTcp::read(boost::asio::mutable_buffers_1 const& buffer, boost::system::error_code& ec) { +size_t SocketTcp::read(boost::asio::mutable_buffers_1 const& buffer, + boost::system::error_code& ec) { + MUTEX_LOCKER(guard, _lock); if (_encrypted) { return socketcommon::doRead(_sslSocket, buffer, ec); } else { @@ -49,22 +55,28 @@ size_t SocketTcp::read(boost::asio::mutable_buffers_1 const& buffer, boost::syst } void SocketTcp::shutdownReceive() { + MUTEX_LOCKER(guard, _lock); _socket.shutdown(boost::asio::ip::tcp::socket::shutdown_receive); } void SocketTcp::shutdownReceive(boost::system::error_code& ec) { + MUTEX_LOCKER(guard, _lock); _socket.shutdown(boost::asio::ip::tcp::socket::shutdown_receive, ec); } void SocketTcp::shutdownSend(boost::system::error_code& ec) { + MUTEX_LOCKER(guard, _lock); _socket.shutdown(boost::asio::ip::tcp::socket::shutdown_send, ec); } int SocketTcp::available(boost::system::error_code& ec) { + MUTEX_LOCKER(guard, _lock); return static_cast(_socket.available(ec)); } -void SocketTcp::asyncRead(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) { +void SocketTcp::asyncRead(boost::asio::mutable_buffers_1 const& buffer, + AsyncHandler const& handler) { + MUTEX_LOCKER(guard, _lock); if (_encrypted) { return socketcommon::doAsyncRead(_sslSocket, buffer, handler); } else { diff --git a/arangod/Scheduler/SocketTcp.h b/arangod/Scheduler/SocketTcp.h index ae07db7df8..a83b8beaac 100644 --- a/arangod/Scheduler/SocketTcp.h +++ b/arangod/Scheduler/SocketTcp.h @@ -23,56 +23,81 @@ #ifndef ARANGOD_SCHEDULER_SOCKET_TCP_H #define ARANGOD_SCHEDULER_SOCKET_TCP_H 1 +#include "Basics/MutexLocker.h" #include "Scheduler/Socket.h" #include namespace arangodb { class SocketTcp final : public Socket { - public: - SocketTcp(boost::asio::io_service& ioService, - boost::asio::ssl::context&& context, bool encrypted) - : Socket(ioService, std::move(context), encrypted), - _sslSocket(ioService, _context), - _socket(_sslSocket.next_layer()), - _peerEndpoint() {} + public: + SocketTcp(boost::asio::io_service& ioService, + boost::asio::ssl::context&& context, bool encrypted) + : Socket(ioService, std::move(context), encrypted), + _sslSocket(ioService, _context), + _socket(_sslSocket.next_layer()), + _peerEndpoint() {} - SocketTcp(SocketTcp&& that) = default; + SocketTcp(SocketTcp&& that) = default; - void close() override { _socket.close(); } + void close() override { + MUTEX_LOCKER(guard, _lock); + _socket.close(); + } - void close(boost::system::error_code& ec) override { _socket.close(ec); } + void close(boost::system::error_code& ec) override { + MUTEX_LOCKER(guard, _lock); + _socket.close(ec); + } - void setNonBlocking(bool v) override { _socket.non_blocking(v); } + void setNonBlocking(bool v) override { + MUTEX_LOCKER(guard, _lock); + _socket.non_blocking(v); + } - std::string peerAddress() override { return _peerEndpoint.address().to_string(); } + std::string peerAddress() override { + return _peerEndpoint.address().to_string(); + } - int peerPort() override { return _peerEndpoint.port(); } + int peerPort() override { return _peerEndpoint.port(); } - bool sslHandshake() override { return socketcommon::doSslHandshake(_sslSocket); } + bool sslHandshake() override { + MUTEX_LOCKER(guard, _lock); + return socketcommon::doSslHandshake(_sslSocket); + } - size_t write(basics::StringBuffer* buffer, boost::system::error_code& ec) override; + size_t write(basics::StringBuffer* buffer, + boost::system::error_code& ec) override; - void asyncWrite(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) override; + void asyncWrite(boost::asio::mutable_buffers_1 const& buffer, + AsyncHandler const& handler) override; - size_t read(boost::asio::mutable_buffers_1 const& buffer, boost::system::error_code& ec) override; + size_t read(boost::asio::mutable_buffers_1 const& buffer, + boost::system::error_code& ec) override; - void asyncRead(boost::asio::mutable_buffers_1 const& buffer, AsyncHandler const& handler) override; + void asyncRead(boost::asio::mutable_buffers_1 const& buffer, + AsyncHandler const& handler) override; - // mop: these functions actually only access the underlying socket. The _sslSocket is - // actually just an additional layer around the socket. These low level functions - // access the _socket only and it is ok that they are not implemented for _sslSocket in - // the children - void shutdownReceive() override; - void shutdownReceive(boost::system::error_code& ec) override; - void shutdownSend(boost::system::error_code& ec) override; - int available(boost::system::error_code& ec) override; + // mop: these functions actually only access the underlying socket. The + // _sslSocket is + // actually just an additional layer around the socket. These low level + // functions + // access the _socket only and it is ok that they are not implemented for + // _sslSocket in + // the children - public: - boost::asio::ssl::stream _sslSocket; - boost::asio::ip::tcp::socket& _socket; + void shutdownReceive() override; + void shutdownReceive(boost::system::error_code& ec) override; + void shutdownSend(boost::system::error_code& ec) override; + int available(boost::system::error_code& ec) override; - boost::asio::ip::tcp::acceptor::endpoint_type _peerEndpoint; + public: + Mutex _lock; + + boost::asio::ssl::stream _sslSocket; + boost::asio::ip::tcp::socket& _socket; + + boost::asio::ip::tcp::acceptor::endpoint_type _peerEndpoint; }; } diff --git a/arangod/Statistics/ConnectionStatistics.cpp b/arangod/Statistics/ConnectionStatistics.cpp index 3157a5b68d..9079d56670 100644 --- a/arangod/Statistics/ConnectionStatistics.cpp +++ b/arangod/Statistics/ConnectionStatistics.cpp @@ -46,6 +46,14 @@ boost::lockfree::queue< // --SECTION-- static public methods // ----------------------------------------------------------------------------- +void ConnectionStatistics::SET_HTTP(ConnectionStatistics* stat) { + if (stat != nullptr) { + stat->_http = true; + + TRI_HttpConnectionsStatistics.incCounter(); + } +} + void ConnectionStatistics::initialize() { _statisticsBuffer.reset(new ConnectionStatistics[QUEUE_SIZE]()); diff --git a/arangod/Statistics/ConnectionStatistics.h b/arangod/Statistics/ConnectionStatistics.h index 74f855b932..c5128694b6 100644 --- a/arangod/Statistics/ConnectionStatistics.h +++ b/arangod/Statistics/ConnectionStatistics.h @@ -47,11 +47,7 @@ class ConnectionStatistics { } } - static void SET_HTTP(ConnectionStatistics* stat) { - if (stat != nullptr) { - stat->_http = true; - } - } + static void SET_HTTP(ConnectionStatistics* stat); static void fill(basics::StatisticsCounter& httpConnections, basics::StatisticsCounter& totalRequests, diff --git a/arangod/Transaction/Methods.cpp b/arangod/Transaction/Methods.cpp index 00586240fd..868614ae38 100644 --- a/arangod/Transaction/Methods.cpp +++ b/arangod/Transaction/Methods.cpp @@ -595,15 +595,11 @@ TransactionCollection* transaction::Methods::trxCollection(TRI_voc_cid_t cid) co } /// @brief order a ditch for a collection -void transaction::Methods::orderDitch(TRI_voc_cid_t cid) { +void transaction::Methods::pinData(TRI_voc_cid_t cid) { TRI_ASSERT(_state != nullptr); TRI_ASSERT(_state->status() == transaction::Status::RUNNING || _state->status() == transaction::Status::CREATED); - if (_ditchCache.cid == cid) { - return; - } - TransactionCollection* trxCollection = _state->collection(cid, AccessMode::Type::READ); if (trxCollection == nullptr) { @@ -612,19 +608,12 @@ void transaction::Methods::orderDitch(TRI_voc_cid_t cid) { TRI_ASSERT(trxCollection->collection() != nullptr); - DocumentDitch* ditch = _transactionContextPtr->orderDitch(trxCollection->collection()); - - if (ditch == nullptr) { - THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); - } - - _ditchCache.cid = cid; - _ditchCache.ditch = ditch; + _transactionContextPtr->pinData(trxCollection->collection()); } /// @brief whether or not a ditch has been created for the collection -bool transaction::Methods::hasDitch(TRI_voc_cid_t cid) const { - return (_transactionContextPtr->ditch(cid) != nullptr); +bool transaction::Methods::isPinned(TRI_voc_cid_t cid) const { + return _transactionContextPtr->isPinned(cid); } /// @brief extract the _id attribute from a slice, and convert it into a @@ -820,7 +809,7 @@ OperationResult transaction::Methods::anyLocal(std::string const& collectionName throwCollectionNotFound(collectionName.c_str()); } - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails int res = lock(trxCollection(cid), AccessMode::Type::READ); @@ -939,7 +928,7 @@ void transaction::Methods::invokeOnAllElements(std::string const& collectionName TransactionCollection* trxCol = trxCollection(cid); LogicalCollection* logical = documentCollection(trxCol); - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails int res = lock(trxCol, AccessMode::Type::READ); @@ -988,7 +977,7 @@ int transaction::Methods::documentFastPath(std::string const& collectionName, TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName); LogicalCollection* collection = documentCollection(trxCollection(cid)); - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails StringRef key(transaction::helpers::extractKeyPart(value)); if (key.empty()) { @@ -1010,7 +999,7 @@ int transaction::Methods::documentFastPath(std::string const& collectionName, return res; } - TRI_ASSERT(hasDitch(cid)); + TRI_ASSERT(isPinned(cid)); uint8_t const* vpack = mmdr->vpack(); TRI_ASSERT(vpack != nullptr); @@ -1032,7 +1021,7 @@ int transaction::Methods::documentFastPathLocal(std::string const& collectionNam TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName); LogicalCollection* collection = documentCollection(trxCollection(cid)); - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails if (key.empty()) { return TRI_ERROR_ARANGO_DOCUMENT_HANDLE_BAD; @@ -1044,7 +1033,7 @@ int transaction::Methods::documentFastPathLocal(std::string const& collectionNam return res; } - TRI_ASSERT(hasDitch(cid)); + TRI_ASSERT(isPinned(cid)); return TRI_ERROR_NO_ERROR; } @@ -1200,7 +1189,7 @@ OperationResult transaction::Methods::documentLocal(std::string const& collectio LogicalCollection* collection = documentCollection(trxCollection(cid)); if (!options.silent) { - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails } VPackBuilder resultBuilder; @@ -1230,7 +1219,7 @@ OperationResult transaction::Methods::documentLocal(std::string const& collectio return res; } - TRI_ASSERT(hasDitch(cid)); + TRI_ASSERT(isPinned(cid)); uint8_t const* vpack = result.vpack(); @@ -1358,7 +1347,7 @@ OperationResult transaction::Methods::insertLocal(std::string const& collectionN LogicalCollection* collection = documentCollection(trxCollection(cid)); if (options.returnNew) { - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails } VPackBuilder resultBuilder; @@ -1652,7 +1641,7 @@ OperationResult transaction::Methods::modifyLocal( LogicalCollection* collection = documentCollection(trxCollection(cid)); if (options.returnOld || options.returnNew) { - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails } // Update/replace are a read and a write, let's get the write lock already @@ -1906,7 +1895,7 @@ OperationResult transaction::Methods::removeLocal(std::string const& collectionN LogicalCollection* collection = documentCollection(trxCollection(cid)); if (options.returnOld) { - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails } VPackBuilder resultBuilder; @@ -2119,7 +2108,7 @@ OperationResult transaction::Methods::allLocal(std::string const& collectionName OperationOptions& options) { TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName); - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails int res = lock(trxCollection(cid), AccessMode::Type::READ); @@ -2197,7 +2186,7 @@ OperationResult transaction::Methods::truncateLocal(std::string const& collectio OperationOptions& options) { TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName); - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails int res = lock(trxCollection(cid), AccessMode::Type::WRITE); @@ -2557,7 +2546,7 @@ std::unique_ptr transaction::Methods::indexScan( TRI_voc_cid_t cid = addCollectionAtRuntime(collectionName); LogicalCollection* logical = documentCollection(trxCollection(cid)); - orderDitch(cid); // will throw when it fails + pinData(cid); // will throw when it fails std::unique_ptr iterator = nullptr; diff --git a/arangod/Transaction/Methods.h b/arangod/Transaction/Methods.h index 24d4cd0876..380a61601e 100644 --- a/arangod/Transaction/Methods.h +++ b/arangod/Transaction/Methods.h @@ -64,7 +64,6 @@ class BaseTraverserEngine; /// @brief forward declarations class CollectionNameResolver; -class DocumentDitch; struct DocumentIdentifierToken; class Index; class ManagedDocumentResult; @@ -181,10 +180,10 @@ class Methods { std::string name(TRI_voc_cid_t cid) const; /// @brief order a ditch for a collection - void orderDitch(TRI_voc_cid_t); + void pinData(TRI_voc_cid_t); /// @brief whether or not a ditch has been created for the collection - bool hasDitch(TRI_voc_cid_t cid) const; + bool isPinned(TRI_voc_cid_t cid) const; /// @brief extract the _id attribute from a slice, and convert it into a /// string @@ -553,13 +552,6 @@ class Methods { /// @brief transaction hints transaction::Hints _localHints; - /// @brief cache for last handed out DocumentDitch - struct { - TRI_voc_cid_t cid = 0; - DocumentDitch* ditch = nullptr; - } - _ditchCache; - struct { TRI_voc_cid_t cid = 0; std::string name; diff --git a/arangod/Utils/CollectionExport.cpp b/arangod/Utils/CollectionExport.cpp index b1695ec26d..7ed6e2a451 100644 --- a/arangod/Utils/CollectionExport.cpp +++ b/arangod/Utils/CollectionExport.cpp @@ -55,7 +55,7 @@ CollectionExport::CollectionExport(TRI_vocbase_t* vocbase, CollectionExport::~CollectionExport() { if (_ditch != nullptr) { - _ditch->ditches()->freeDocumentDitch(_ditch, false); + _ditch->ditches()->freeMMFilesDocumentDitch(_ditch, false); } } @@ -67,7 +67,7 @@ void CollectionExport::run(uint64_t maxWaitTime, size_t limit) { // create a ditch under the compaction lock _ditch = arangodb::MMFilesCollection::toMMFilesCollection(_collection) ->ditches() - ->createDocumentDitch(false, __FILE__, __LINE__); + ->createMMFilesDocumentDitch(false, __FILE__, __LINE__); }); // now we either have a ditch or not diff --git a/arangod/Utils/CollectionExport.h b/arangod/Utils/CollectionExport.h index 8064de7ac9..665e19d212 100644 --- a/arangod/Utils/CollectionExport.h +++ b/arangod/Utils/CollectionExport.h @@ -34,7 +34,7 @@ struct TRI_vocbase_t; namespace arangodb { class CollectionGuard; -class DocumentDitch; +class MMFilesDocumentDitch; class CollectionExport { friend class ExportCursor; @@ -63,7 +63,7 @@ class CollectionExport { private: std::unique_ptr _guard; LogicalCollection* _collection; - arangodb::DocumentDitch* _ditch; + arangodb::MMFilesDocumentDitch* _ditch; std::string const _name; arangodb::CollectionNameResolver _resolver; Restrictions _restrictions; diff --git a/arangod/Utils/CollectionKeys.cpp b/arangod/Utils/CollectionKeys.cpp index aa2ad7b4b2..b0ce480cc4 100644 --- a/arangod/Utils/CollectionKeys.cpp +++ b/arangod/Utils/CollectionKeys.cpp @@ -74,7 +74,7 @@ CollectionKeys::~CollectionKeys() { engine->removeCompactionBlocker(_vocbase, _blockerId); if (_ditch != nullptr) { - _ditch->ditches()->freeDocumentDitch(_ditch, false); + _ditch->ditches()->freeMMFilesDocumentDitch(_ditch, false); } } @@ -91,7 +91,7 @@ void CollectionKeys::create(TRI_voc_tick_t maxTick) { // create a ditch under the compaction lock _ditch = arangodb::MMFilesCollection::toMMFilesCollection(_collection) ->ditches() - ->createDocumentDitch(false, __FILE__, __LINE__); + ->createMMFilesDocumentDitch(false, __FILE__, __LINE__); }); // now we either have a ditch or not diff --git a/arangod/Utils/CollectionKeys.h b/arangod/Utils/CollectionKeys.h index eac9a7466c..69c71bd312 100644 --- a/arangod/Utils/CollectionKeys.h +++ b/arangod/Utils/CollectionKeys.h @@ -41,7 +41,7 @@ class Slice; } class CollectionGuard; -class DocumentDitch; +class MMFilesDocumentDitch; typedef TRI_voc_tick_t CollectionKeysId; @@ -115,7 +115,7 @@ class CollectionKeys { TRI_vocbase_t* _vocbase; std::unique_ptr _guard; arangodb::LogicalCollection* _collection; - arangodb::DocumentDitch* _ditch; + arangodb::MMFilesDocumentDitch* _ditch; std::string const _name; arangodb::CollectionNameResolver _resolver; TRI_voc_tick_t _blockerId; diff --git a/arangod/Utils/DatabaseGuard.h b/arangod/Utils/DatabaseGuard.h index bfcf21f97a..44d51c9a0b 100644 --- a/arangod/Utils/DatabaseGuard.h +++ b/arangod/Utils/DatabaseGuard.h @@ -53,6 +53,8 @@ class DatabaseGuard { if (_vocbase == nullptr) { THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } + + TRI_ASSERT(!_vocbase->isDangling()); } /// @brief create the guard, using a database name @@ -65,11 +67,14 @@ class DatabaseGuard { if (_vocbase == nullptr) { THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } + + TRI_ASSERT(!_vocbase->isDangling()); } /// @brief destroy the guard ~DatabaseGuard() { TRI_ASSERT(_vocbase != nullptr); + TRI_ASSERT(!_vocbase->isDangling()); _vocbase->release(); } diff --git a/arangod/Utils/SingleCollectionTransaction.h b/arangod/Utils/SingleCollectionTransaction.h index d802d5e3f0..1c19591184 100644 --- a/arangod/Utils/SingleCollectionTransaction.h +++ b/arangod/Utils/SingleCollectionTransaction.h @@ -30,7 +30,7 @@ #include "VocBase/voc-types.h" namespace arangodb { -class DocumentDitch; +class MMFilesDocumentDitch; class TransactionContext; class SingleCollectionTransaction final : public transaction::Methods { diff --git a/arangod/Utils/TransactionContext.cpp b/arangod/Utils/TransactionContext.cpp index 1d1071adf8..e78c71c95c 100644 --- a/arangod/Utils/TransactionContext.cpp +++ b/arangod/Utils/TransactionContext.cpp @@ -88,7 +88,7 @@ TransactionContext::~TransactionContext() { for (auto& it : _ditches) { // we're done with this ditch auto& ditch = it.second; - ditch->ditches()->freeDocumentDitch(ditch, true /* fromTransaction */); + ditch->ditches()->freeMMFilesDocumentDitch(ditch, true /* fromTransaction */); // If some external entity is still using the ditch, it is kept! } @@ -109,11 +109,8 @@ VPackCustomTypeHandler* TransactionContext::createCustomTypeHandler(TRI_vocbase_ return new CustomTypeHandler(vocbase, resolver); } -/// @brief order a document ditch for the collection -/// this will create one if none exists. if no ditch can be created, the -/// function will return a nullptr! -DocumentDitch* TransactionContext::orderDitch(LogicalCollection* collection) { - +/// @brief pin data for the collection +void TransactionContext::pinData(LogicalCollection* collection) { TRI_voc_cid_t cid = collection->cid(); auto it = _ditches.find(cid); @@ -122,35 +119,29 @@ DocumentDitch* TransactionContext::orderDitch(LogicalCollection* collection) { // tell everyone else this ditch is still in use, // at least until the transaction is over TRI_ASSERT((*it).second->usedByTransaction()); - // ditch already exists, return it - return (*it).second; + // ditch already exists + return; } // this method will not throw, but may return a nullptr - auto ditch = arangodb::MMFilesCollection::toMMFilesCollection(collection)->ditches()->createDocumentDitch(true, __FILE__, __LINE__); + auto ditch = arangodb::MMFilesCollection::toMMFilesCollection(collection)->ditches()->createMMFilesDocumentDitch(true, __FILE__, __LINE__); - if (ditch != nullptr) { - try { - _ditches.emplace(cid, ditch); - } - catch (...) { - ditch->ditches()->freeDocumentDitch(ditch, true); - ditch = nullptr; // return a nullptr - } + if (ditch == nullptr) { + THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY); } - return ditch; + try { + _ditches.emplace(cid, ditch); + } + catch (...) { + ditch->ditches()->freeMMFilesDocumentDitch(ditch, true); + throw; + } } - -/// @brief return the ditch for a collection -/// this will return a nullptr if no ditch exists -DocumentDitch* TransactionContext::ditch(TRI_voc_cid_t cid) const { - auto it = _ditches.find(cid); - if (it == _ditches.end()) { - return nullptr; - } - return (*it).second; +/// @brief whether or not the data for the collection is pinned +bool TransactionContext::isPinned(TRI_voc_cid_t cid) const { + return (_ditches.find(cid) != _ditches.end()); } /// @brief temporarily lease a StringBuffer object diff --git a/arangod/Utils/TransactionContext.h b/arangod/Utils/TransactionContext.h index 19c00c9cff..40e52612a3 100644 --- a/arangod/Utils/TransactionContext.h +++ b/arangod/Utils/TransactionContext.h @@ -25,7 +25,6 @@ #define ARANGOD_UTILS_TRANSACTION_CONTEXT_H 1 #include "Basics/Common.h" -#include "Basics/Mutex.h" #include "Basics/SmallVector.h" #include "VocBase/voc-types.h" @@ -43,13 +42,14 @@ class Builder; struct CustomTypeHandler; } -class CollectionNameResolver; -class DocumentDitch; -class LogicalCollection; namespace transaction { class Methods; } -; + + +class CollectionNameResolver; +class MMFilesDocumentDitch; +class LogicalCollection; class TransactionState; class TransactionContext { @@ -75,15 +75,12 @@ class TransactionContext { /// @brief return the vocbase TRI_vocbase_t* vocbase() const { return _vocbase; } - /// @brief order a document ditch for the collection - /// this will create one if none exists. if no ditch can be created, the - /// function will return a nullptr! - DocumentDitch* orderDitch(arangodb::LogicalCollection*); - - /// @brief return the ditch for a collection - /// this will return a nullptr if no ditch exists - DocumentDitch* ditch(TRI_voc_cid_t) const; + /// @brief pin data for the collection + void pinData(arangodb::LogicalCollection*); + /// @brief whether or not the data for the collection is pinned + bool isPinned(TRI_voc_cid_t) const; + /// @brief temporarily lease a StringBuffer object basics::StringBuffer* leaseStringBuffer(size_t initialSize); @@ -138,7 +135,7 @@ class TransactionContext { std::shared_ptr _customTypeHandler; - std::unordered_map _ditches; + std::unordered_map _ditches; SmallVector::allocator_type::arena_type _arena; SmallVector _builders; diff --git a/arangod/V8Server/V8DealerFeature.cpp b/arangod/V8Server/V8DealerFeature.cpp index cbe438cfd2..dc86134207 100644 --- a/arangod/V8Server/V8DealerFeature.cpp +++ b/arangod/V8Server/V8DealerFeature.cpp @@ -452,6 +452,11 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase, if (_stopping) { return nullptr; } + + if (!vocbase->use()) { + return nullptr; + } + V8Context* context = nullptr; @@ -506,6 +511,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase, } if (context == nullptr) { + vocbase->release(); return nullptr; } } @@ -536,6 +542,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase, // in case we are in the shutdown phase, do not enter a context! // the context might have been deleted by the shutdown if (_stopping) { + vocbase->release(); return nullptr; } @@ -550,7 +557,7 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase, // should not fail because we reserved enough space beforehand _busyContexts.emplace(context); } - + // when we get here, we should have a context and an isolate TRI_ASSERT(context != nullptr); TRI_ASSERT(context->_isolate != nullptr); @@ -580,8 +587,6 @@ V8Context* V8DealerFeature::enterContext(TRI_vocbase_t* vocbase, v8g->_vocbase = vocbase; v8g->_allowUseDatabase = allowUseDatabase; - vocbase->use(); - try { LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "entering V8 context " << context->_id; context->handleGlobalContextMethods(); @@ -752,7 +757,7 @@ void V8DealerFeature::applyContextUpdates() { vocbase, true, static_cast(i)); if (context == nullptr) { - LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "could not updated V8 context #" << i; + LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "could not update V8 context #" << i; FATAL_ERROR_EXIT(); } diff --git a/arangod/V8Server/v8-actions.cpp b/arangod/V8Server/v8-actions.cpp index fb82b8e261..422cf0628f 100644 --- a/arangod/V8Server/v8-actions.cpp +++ b/arangod/V8Server/v8-actions.cpp @@ -44,7 +44,6 @@ #include "Rest/GeneralRequest.h" #include "Rest/HttpRequest.h" #include "Rest/HttpResponse.h" -#include "RestServer/VocbaseContext.h" #include "V8/v8-buffer.h" #include "V8/v8-conv.h" #include "V8/v8-utils.h" diff --git a/arangod/V8Server/v8-collection-util.cpp b/arangod/V8Server/v8-collection-util.cpp index 4226ac8308..889df43693 100644 --- a/arangod/V8Server/v8-collection-util.cpp +++ b/arangod/V8Server/v8-collection-util.cpp @@ -92,7 +92,7 @@ static void WeakCollectionCallback(const v8::WeakCallbackInfo< v8g->decreaseActiveExternals(); // decrease the reference-counter for the database - collection->vocbase()->release(); + TRI_ASSERT(!collection->vocbase()->isDangling()); // find the persistent handle #if ARANGODB_ENABLE_MAINTAINER_MODE @@ -103,9 +103,12 @@ static void WeakCollectionCallback(const v8::WeakCallbackInfo< // dispose and clear the persistent handle v8g->JSCollections[collection].Reset(); v8g->JSCollections.erase(collection); - + if (!collection->isLocal()) { + collection->vocbase()->release(); delete collection; + } else { + collection->vocbase()->release(); } } @@ -125,7 +128,6 @@ v8::Handle WrapCollection(v8::Isolate* isolate, v8::Handle result = VocbaseColTempl->NewInstance(); if (!result.IsEmpty()) { - LogicalCollection* nonconstCollection = const_cast(collection); @@ -138,7 +140,8 @@ v8::Handle WrapCollection(v8::Isolate* isolate, if (it == v8g->JSCollections.end()) { // increase the reference-counter for the database - nonconstCollection->vocbase()->use(); + TRI_ASSERT(!nonconstCollection->vocbase()->isDangling()); + nonconstCollection->vocbase()->forceUse(); try { auto externalCollection = v8::External::New(isolate, nonconstCollection); diff --git a/arangod/V8Server/v8-query.cpp b/arangod/V8Server/v8-query.cpp index fe9c75b0fd..2bc5c27b01 100644 --- a/arangod/V8Server/v8-query.cpp +++ b/arangod/V8Server/v8-query.cpp @@ -381,7 +381,7 @@ static void JS_ChecksumCollection( TRI_V8_THROW_EXCEPTION(res); } - trx.orderDitch(col->cid()); // will throw when it fails + trx.pinData(col->cid()); // will throw when it fails // get last tick LogicalCollection* collection = trx.documentCollection(); diff --git a/arangod/V8Server/v8-util.cpp b/arangod/V8Server/v8-util.cpp index 6290003918..a6e13664e2 100644 --- a/arangod/V8Server/v8-util.cpp +++ b/arangod/V8Server/v8-util.cpp @@ -38,6 +38,7 @@ TRI_vocbase_t* GetContextVocBase(v8::Isolate* isolate) { TRI_GET_GLOBALS(); TRI_ASSERT(v8g->_vocbase != nullptr); + TRI_ASSERT(!v8g->_vocbase->isDangling()); return static_cast(v8g->_vocbase); } diff --git a/arangod/V8Server/v8-vocbase.cpp b/arangod/V8Server/v8-vocbase.cpp index 1344005e3b..357b2a43cc 100644 --- a/arangod/V8Server/v8-vocbase.cpp +++ b/arangod/V8Server/v8-vocbase.cpp @@ -58,7 +58,6 @@ #include "Rest/Version.h" #include "RestServer/ConsoleThread.h" #include "RestServer/DatabaseFeature.h" -#include "RestServer/VocbaseContext.h" #include "Statistics/StatisticsFeature.h" #include "StorageEngine/EngineSelectorFeature.h" #include "StorageEngine/StorageEngine.h" @@ -2048,7 +2047,8 @@ static void JS_IsSystemDatabase( //////////////////////////////////////////////////////////////////////////////// /// @brief fake this method so the interface is similar to the client. //////////////////////////////////////////////////////////////////////////////// -static void JS_fakeFlushCache(v8::FunctionCallbackInfo const& args) { + +static void JS_FakeFlushCache(v8::FunctionCallbackInfo const& args) { TRI_V8_TRY_CATCH_BEGIN(isolate); TRI_V8_RETURN_UNDEFINED(); TRI_V8_TRY_CATCH_END; @@ -2057,6 +2057,7 @@ static void JS_fakeFlushCache(v8::FunctionCallbackInfo const& args) { //////////////////////////////////////////////////////////////////////////////// /// @brief was docuBlock databaseUseDatabase //////////////////////////////////////////////////////////////////////////////// + static void JS_UseDatabase(v8::FunctionCallbackInfo const& args) { TRI_V8_TRY_CATCH_BEGIN(isolate); v8::HandleScope scope(isolate); @@ -2097,6 +2098,8 @@ static void JS_UseDatabase(v8::FunctionCallbackInfo const& args) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } + TRI_ASSERT(!vocbase->isDangling()); + // switch databases void* orig = v8g->_vocbase; TRI_ASSERT(orig != nullptr); @@ -2125,7 +2128,7 @@ static void ListDatabasesCoordinator( std::vector list = ci->databases(true); v8::Handle result = v8::Array::New(isolate); for (size_t i = 0; i < list.size(); ++i) { - result->Set((uint32_t)i, TRI_V8_STD_STRING(list[i])); + result->Set(static_cast(i), TRI_V8_STD_STRING(list[i])); } TRI_V8_RETURN(result); } else { @@ -2366,6 +2369,8 @@ static void JS_CreateDatabase(v8::FunctionCallbackInfo const& args) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_DATABASE_NOT_FOUND); } + TRI_ASSERT(!vocbase->isDangling()); + if (TRI_GetOperationModeServer() == TRI_VOCBASE_MODE_NO_CREATE) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_READ_ONLY); } @@ -2406,6 +2411,7 @@ static void JS_CreateDatabase(v8::FunctionCallbackInfo const& args) { } TRI_ASSERT(database != nullptr); + TRI_ASSERT(!database->isDangling()); // copy users into context if (args.Length() >= 3 && args[2]->IsArray()) { @@ -2420,21 +2426,35 @@ static void JS_CreateDatabase(v8::FunctionCallbackInfo const& args) { } // switch databases - TRI_vocbase_t* orig = v8g->_vocbase; - TRI_ASSERT(orig != nullptr); + { + TRI_vocbase_t* orig = v8g->_vocbase; + TRI_ASSERT(orig != nullptr); - v8g->_vocbase = database; + v8g->_vocbase = database; - // initalize database - V8DealerFeature::DEALER->startupLoader()->executeGlobalScript( - isolate, isolate->GetCurrentContext(), - "server/bootstrap/local-database.js"); + // initalize database + try { + V8DealerFeature::DEALER->startupLoader()->executeGlobalScript( + isolate, isolate->GetCurrentContext(), + "server/bootstrap/local-database.js"); + if (v8g->_vocbase == database) { + // decrease the reference-counter only if we are coming back with the same database + database->release(); + } - // and switch back - v8g->_vocbase = orig; + // and switch back + v8g->_vocbase = orig; + } catch (...) { + if (v8g->_vocbase == database) { + // decrease the reference-counter only if we are coming back with the same database + database->release(); + } - // finally decrease the reference-counter - database->release(); + // and switch back + v8g->_vocbase = orig; + throw; + } + } TRI_V8_RETURN_TRUE(); TRI_V8_TRY_CATCH_END @@ -2513,6 +2533,7 @@ static void JS_DropDatabase(v8::FunctionCallbackInfo const& args) { if (!vocbase->isSystem()) { TRI_V8_THROW_EXCEPTION(TRI_ERROR_ARANGO_USE_SYSTEM_DATABASE); } + // clear collections in cache object TRI_ClearObjectCacheV8(isolate); @@ -2883,9 +2904,8 @@ void TRI_InitV8VocBridge(v8::Isolate* isolate, v8::Handle context, JS_Databases); TRI_AddMethodVocbase(isolate, ArangoNS, TRI_V8_ASCII_STRING("_useDatabase"), JS_UseDatabase); - TRI_AddMethodVocbase(isolate, ArangoNS, TRI_V8_ASCII_STRING("_flushCache"), - JS_fakeFlushCache, true); + JS_FakeFlushCache, true); TRI_InitV8Statistics(isolate, context); diff --git a/arangod/VocBase/Graphs.cpp b/arangod/VocBase/Graphs.cpp index 04dbe53c3c..d862988371 100644 --- a/arangod/VocBase/Graphs.cpp +++ b/arangod/VocBase/Graphs.cpp @@ -39,10 +39,9 @@ std::string const GRAPHS = "_graphs"; /// @brief Load a graph from the _graphs collection; local and coordinator way //////////////////////////////////////////////////////////////////////////////// -arangodb::aql::Graph* arangodb::lookupGraphByName(TRI_vocbase_t* vocbase, +arangodb::aql::Graph* arangodb::lookupGraphByName(std::shared_ptr transactionContext, std::string const& name) { - SingleCollectionTransaction trx(StandaloneTransactionContext::Create(vocbase), - GRAPHS, AccessMode::Type::READ); + SingleCollectionTransaction trx(transactionContext, GRAPHS, AccessMode::Type::READ); int res = trx.begin(); diff --git a/arangod/VocBase/Graphs.h b/arangod/VocBase/Graphs.h index 7519c942fc..9ac6bcab74 100644 --- a/arangod/VocBase/Graphs.h +++ b/arangod/VocBase/Graphs.h @@ -31,13 +31,15 @@ namespace aql { class Graph; } +class TransactionContext; + //////////////////////////////////////////////////////////////////////////////// /// @brief get an instance of Graph by Name. /// returns nullptr if graph is not existing /// The caller has to take care for the memory. //////////////////////////////////////////////////////////////////////////////// -arangodb::aql::Graph* lookupGraphByName(TRI_vocbase_t*, std::string const&); +arangodb::aql::Graph* lookupGraphByName(std::shared_ptr, std::string const& name); } // namespace arangodb diff --git a/arangod/VocBase/LogicalCollection.cpp b/arangod/VocBase/LogicalCollection.cpp index b7d0ec6e89..e2dfe6aeb0 100644 --- a/arangod/VocBase/LogicalCollection.cpp +++ b/arangod/VocBase/LogicalCollection.cpp @@ -24,6 +24,7 @@ #include "LogicalCollection.h" +#include "Aql/PlanCache.h" #include "Aql/QueryCache.h" #include "Basics/LocalTaskQueue.h" #include "Basics/ReadLocker.h" @@ -717,9 +718,7 @@ int LogicalCollection::rename(std::string const& newName) { // Otherwise caching is destroyed. TRI_ASSERT(!ServerState::instance()->isCoordinator()); // NOT YET IMPLEMENTED - WRITE_LOCKER_EVENTUAL(locker, _lock); - - // Check for illeagal states. + // Check for illegal states. switch (_status) { case TRI_VOC_COL_STATUS_CORRUPTED: return TRI_ERROR_ARANGO_CORRUPTED_COLLECTION; @@ -730,12 +729,6 @@ int LogicalCollection::rename(std::string const& newName) { break; } - // Check for duplicate name - auto other = _vocbase->lookupCollection(newName); - if (other != nullptr) { - return TRI_ERROR_ARANGO_DUPLICATE_NAME; - } - switch (_status) { case TRI_VOC_COL_STATUS_UNLOADED: case TRI_VOC_COL_STATUS_LOADED: @@ -1165,6 +1158,7 @@ std::shared_ptr LogicalCollection::createIndex(transaction::Methods* trx, THROW_ARANGO_EXCEPTION(res); } + arangodb::aql::PlanCache::instance()->invalidate(_vocbase); // Until here no harm is done if sth fails. The shared ptr will clean up. if // left before @@ -1218,6 +1212,8 @@ bool LogicalCollection::removeIndex(TRI_idx_iid_t iid) { /// @brief drops an index, including index file removal and replication bool LogicalCollection::dropIndex(TRI_idx_iid_t iid) { TRI_ASSERT(!ServerState::instance()->isCoordinator()); + arangodb::aql::PlanCache::instance()->invalidate(_vocbase); + arangodb::aql::QueryCache::instance()->invalidate(_vocbase, name()); return _physical->dropIndex(iid); } diff --git a/arangod/VocBase/TransactionManager.cpp b/arangod/VocBase/TransactionManager.cpp index bd8be807f1..076640722d 100644 --- a/arangod/VocBase/TransactionManager.cpp +++ b/arangod/VocBase/TransactionManager.cpp @@ -103,7 +103,7 @@ std::unordered_set TransactionManager::getFailedTransactions() { return failedTransactions; } -void TransactionManager::iterateActiveTransactions(std::function const& callback) { +void TransactionManager::iterateActiveTransactions(std::function const& callback) { WRITE_LOCKER(allTransactionsLocker, _allTransactionsLock); // iterate over all active transactions diff --git a/arangod/VocBase/TransactionManager.h b/arangod/VocBase/TransactionManager.h index 1ece883eeb..20ce8baa36 100644 --- a/arangod/VocBase/TransactionManager.h +++ b/arangod/VocBase/TransactionManager.h @@ -31,8 +31,7 @@ namespace arangodb { // to be derived by storage engines -struct TransactionData { -}; +struct TransactionData {}; class TransactionManager { static constexpr size_t numBuckets = 16; @@ -59,7 +58,7 @@ class TransactionManager { void unregisterTransaction(TRI_voc_tid_t transactionId, bool markAsFailed); // iterate all the active transactions - void iterateActiveTransactions(std::function const& callback); + void iterateActiveTransactions(std::function const& callback); private: // hashes the transaction id into a bucket diff --git a/arangod/VocBase/TraverserOptions.cpp b/arangod/VocBase/TraverserOptions.cpp index 6450af518c..08ab993143 100644 --- a/arangod/VocBase/TraverserOptions.cpp +++ b/arangod/VocBase/TraverserOptions.cpp @@ -94,7 +94,6 @@ arangodb::traverser::TraverserOptions::LookupInfo::LookupInfo( "Each lookup requires expression to be an object"); } - expression = new aql::Expression(query->ast(), read); read = info.get("condition"); @@ -113,7 +112,7 @@ arangodb::traverser::TraverserOptions::LookupInfo::LookupInfo( indexCondition(other.indexCondition), conditionNeedUpdate(other.conditionNeedUpdate), conditionMemberToUpdate(other.conditionMemberToUpdate) { - expression = other.expression->clone(); + expression = other.expression->clone(nullptr); } void arangodb::traverser::TraverserOptions::LookupInfo::buildEngineInfo( diff --git a/arangod/VocBase/replication-dump.cpp b/arangod/VocBase/replication-dump.cpp index d19f6209c1..70f34b7eb7 100644 --- a/arangod/VocBase/replication-dump.cpp +++ b/arangod/VocBase/replication-dump.cpp @@ -608,7 +608,7 @@ int TRI_DumpCollectionReplication(TRI_replication_dump_t* dump, auto mmfiles = arangodb::MMFilesCollection::toMMFilesCollection(collection); // create a barrier so the underlying collection is not unloaded - auto b = mmfiles->ditches()->createReplicationDitch(__FILE__, __LINE__); + auto b = mmfiles->ditches()->createMMFilesReplicationDitch(__FILE__, __LINE__); if (b == nullptr) { return TRI_ERROR_OUT_OF_MEMORY; @@ -717,6 +717,10 @@ int TRI_DumpLogReplication( dump->_collectionNames[collectionId] = name.copyString(); } } + } else if (type == TRI_DF_MARKER_VPACK_RENAME_COLLECTION) { + // invalidate collection name cache because this is a + // rename operation + dump->_collectionNames.clear(); } ptr += MMFilesDatafileHelper::AlignedMarkerSize(marker); diff --git a/arangod/VocBase/vocbase.cpp b/arangod/VocBase/vocbase.cpp index df15af1c65..b8c2cfd792 100644 --- a/arangod/VocBase/vocbase.cpp +++ b/arangod/VocBase/vocbase.cpp @@ -31,6 +31,7 @@ #include "ApplicationFeatures/ApplicationServer.h" #include "Aql/QueryCache.h" #include "Aql/QueryList.h" +#include "Aql/PlanCache.h" #include "Basics/ConditionLocker.h" #include "Basics/Exceptions.h" #include "Basics/FileUtils.h" @@ -66,6 +67,70 @@ using namespace arangodb; using namespace arangodb::basics; + +/// @brief increase the reference counter for a database +bool TRI_vocbase_t::use() { + auto expected = _refCount.load(std::memory_order_relaxed); + while (true) { + if ((expected & 1) != 0) { + // deleted bit is set + return false; + } + // increase the reference counter by 2. + // this is because we use odd values to indicate that the database has been + // marked as deleted + auto updated = expected + 2; + TRI_ASSERT((updated & 1) == 0); + if (_refCount.compare_exchange_weak(expected, updated, std::memory_order_release, std::memory_order_relaxed)) { + // compare-exchange worked. we're done + return true; + } + // compare-exchange failed. try again! + expected = _refCount.load(std::memory_order_relaxed); + } +} + +void TRI_vocbase_t::forceUse() { + _refCount += 2; +} + +/// @brief decrease the reference counter for a database +void TRI_vocbase_t::release() { + // decrease the reference counter by 2. + // this is because we use odd values to indicate that the database has been + // marked as deleted + auto oldValue = _refCount.fetch_sub(2); + TRI_ASSERT(oldValue >= 2); +} + +/// @brief returns whether the database can be dropped +bool TRI_vocbase_t::isDangling() const { + if (isSystem()) { + return false; + } + auto refCount = _refCount.load(); + // we are intentionally comparing with exactly 1 here, because a 1 means + // that noone else references the database but it has been marked as deleted + return (refCount == 1); +} + +/// @brief whether or not the vocbase has been marked as deleted +bool TRI_vocbase_t::isDropped() const { + auto refCount = _refCount.load(); + // if the stored value is odd, it means the database has been marked as + // deleted + return (refCount % 2 == 1); +} + +/// @brief marks a database as deleted +bool TRI_vocbase_t::markAsDropped() { + TRI_ASSERT(!isSystem()); + + auto oldValue = _refCount.fetch_or(1); + // if the previously stored value is odd, it means the database has already + // been marked as deleted + return (oldValue % 2 == 0); +} /// @brief signal the cleanup thread to wake up void TRI_vocbase_t::signalCleanup() { @@ -204,9 +269,9 @@ bool TRI_vocbase_t::UnloadCollectionCallback(LogicalCollection* collection) { auto ditches = arangodb::MMFilesCollection::toMMFilesCollection(collection)->ditches(); - if (ditches->contains(arangodb::Ditch::TRI_DITCH_DOCUMENT) || - ditches->contains(arangodb::Ditch::TRI_DITCH_REPLICATION) || - ditches->contains(arangodb::Ditch::TRI_DITCH_COMPACTION)) { + if (ditches->contains(arangodb::MMFilesDitch::TRI_DITCH_DOCUMENT) || + ditches->contains(arangodb::MMFilesDitch::TRI_DITCH_REPLICATION) || + ditches->contains(arangodb::MMFilesDitch::TRI_DITCH_COMPACTION)) { locker.unlock(); // still some ditches left... @@ -363,7 +428,7 @@ int TRI_vocbase_t::loadCollection(arangodb::LogicalCollection* collection, // check if there is a deferred drop action going on for this collection if (arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->contains(arangodb::Ditch::TRI_DITCH_COLLECTION_DROP)) { + ->contains(arangodb::MMFilesDitch::TRI_DITCH_COLLECTION_DROP)) { // drop call going on, we must abort locker.unlock(); @@ -508,6 +573,7 @@ int TRI_vocbase_t::dropCollectionWorker(arangodb::LogicalCollection* collection, TRI_ASSERT(writeLocker.isLocked()); TRI_ASSERT(locker.isLocked()); + arangodb::aql::PlanCache::instance()->invalidate(this); arangodb::aql::QueryCache::instance()->invalidate(this); // collection already deleted @@ -769,6 +835,34 @@ LogicalCollection* TRI_vocbase_t::lookupCollection(std::string const& name) { return (*it).second; } +/// @brief looks up a collection by name +LogicalCollection* TRI_vocbase_t::lookupCollectionNoLock(std::string const& name) { + if (name.empty()) { + return nullptr; + } + + // if collection name is passed as a stringified id, we'll use the lookupbyid + // function + // this is safe because collection names must not start with a digit + if (name[0] >= '0' && name[0] <= '9') { + TRI_voc_cid_t id = StringUtils::uint64(name); + auto it = _collectionsById.find(id); + + if (it == _collectionsById.end()) { + return nullptr; + } + return (*it).second; + } + + // otherwise we'll look up the collection by name + auto it = _collectionsByName.find(name); + + if (it == _collectionsByName.end()) { + return nullptr; + } + return (*it).second; +} + /// @brief looks up a collection by identifier LogicalCollection* TRI_vocbase_t::lookupCollection(TRI_voc_cid_t id) { READ_LOCKER(readLocker, _collectionsLock); @@ -902,7 +996,7 @@ int TRI_vocbase_t::unloadCollection(arangodb::LogicalCollection* collection, boo // add callback for unload arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->createUnloadCollectionDitch(collection, UnloadCollectionCallback, + ->createMMFilesUnloadCollectionDitch(collection, UnloadCollectionCallback, __FILE__, __LINE__); } // release locks @@ -942,7 +1036,7 @@ int TRI_vocbase_t::dropCollection(arangodb::LogicalCollection* collection, bool // add callback for dropping arangodb::MMFilesCollection::toMMFilesCollection(collection) ->ditches() - ->createDropCollectionDitch(collection, DropCollectionCallback, + ->createMMFilesDropCollectionDitch(collection, DropCollectionCallback, __FILE__, __LINE__); // wake up the cleanup thread @@ -999,6 +1093,44 @@ int TRI_vocbase_t::renameCollection(arangodb::LogicalCollection* collection, } READ_LOCKER(readLocker, _inventoryLock); + + CONDITIONAL_WRITE_LOCKER(writeLocker, _collectionsLock, false); + CONDITIONAL_WRITE_LOCKER(locker, collection->_lock, false); + + while (true) { + TRI_ASSERT(!writeLocker.isLocked()); + TRI_ASSERT(!locker.isLocked()); + + // block until we have acquired this lock + writeLocker.lock(); + // we now have the one lock + + TRI_ASSERT(writeLocker.isLocked()); + + if (locker.tryLock()) { + // we now have both locks and can continue outside of this loop + break; + } + + // unlock the write locker so we don't block other operations + writeLocker.unlock(); + + TRI_ASSERT(!writeLocker.isLocked()); + TRI_ASSERT(!locker.isLocked()); + + // sleep for a while + std::this_thread::yield(); + } + + TRI_ASSERT(writeLocker.isLocked()); + TRI_ASSERT(locker.isLocked()); + + // Check for duplicate name + auto other = lookupCollectionNoLock(newName); + + if (other != nullptr) { + return TRI_ERROR_ARANGO_DUPLICATE_NAME; + } int res = collection->rename(newName); @@ -1007,17 +1139,23 @@ int TRI_vocbase_t::renameCollection(arangodb::LogicalCollection* collection, return res; } - { - WRITE_LOCKER(writeLocker, _collectionsLock); // The collection is renamed. Now swap cache entries. - auto it2 = _collectionsByName.emplace(newName, collection); - TRI_ASSERT(it2.second); + auto it2 = _collectionsByName.emplace(newName, collection); + TRI_ASSERT(it2.second); + try { _collectionsByName.erase(oldName); - TRI_ASSERT(_collectionsByName.size() == _collectionsById.size()); + } catch (...) { + _collectionsByName.erase(newName); + throw; } + TRI_ASSERT(_collectionsByName.size() == _collectionsById.size()); + + locker.unlock(); + writeLocker.unlock(); // invalidate all entries for the two collections + arangodb::aql::PlanCache::instance()->invalidate(this); arangodb::aql::QueryCache::instance()->invalidate( this, std::vector{oldName, newName}); diff --git a/arangod/VocBase/vocbase.h b/arangod/VocBase/vocbase.h index 2dc2bd43f5..e913f2678a 100644 --- a/arangod/VocBase/vocbase.h +++ b/arangod/VocBase/vocbase.h @@ -208,51 +208,24 @@ struct TRI_vocbase_t { /// @brief signal the cleanup thread to wake up void signalCleanup(); - /// @brief whether or not the vocbase has been marked as deleted - inline bool isDropped() const { - auto refCount = _refCount.load(); - // if the stored value is odd, it means the database has been marked as - // deleted - return (refCount % 2 == 1); - } + /// @brief increase the reference counter for a database. + /// will return true if the refeence counter was increased, false otherwise + /// in case false is returned, the database must not be used + bool use(); - /// @brief increase the reference counter for a database - bool use() { - // increase the reference counter by 2. - // this is because we use odd values to indicate that the database has been - // marked as deleted - auto oldValue = _refCount.fetch_add(2, std::memory_order_release); - // check if the deleted bit is set - return (oldValue % 2 != 1); - } + void forceUse(); /// @brief decrease the reference counter for a database - void release() { - // decrease the reference counter by 2. - // this is because we use odd values to indicate that the database has been - // marked as deleted - auto oldValue = _refCount.fetch_sub(2, std::memory_order_release); - TRI_ASSERT(oldValue >= 2); - } + void release(); + + /// @brief returns whether the database is dangling + bool isDangling() const; - /// @brief returns whether the database can be dropped - bool canBeDropped() const { - if (isSystem()) { - return false; - } - auto refCount = _refCount.load(); - // we are intentionally comparing with exactly 1 here, because a 1 means - // that noone else references the database but it has been marked as deleted - return (refCount == 1); - } + /// @brief whether or not the vocbase has been marked as deleted + bool isDropped() const; /// @brief marks a database as deleted - bool markAsDropped() { - auto oldValue = _refCount.fetch_or(1, std::memory_order_release); - // if the previously stored value is odd, it means the database has already - // been marked as deleted - return (oldValue % 2 == 0); - } + bool markAsDropped(); /// @brief returns whether the database is the system database bool isSystem() const { return name() == TRI_VOC_SYSTEM_DATABASE; } @@ -329,6 +302,9 @@ struct TRI_vocbase_t { void releaseCollection(arangodb::LogicalCollection* collection); private: + /// @brief looks up a collection by name, without acquiring a lock + arangodb::LogicalCollection* lookupCollectionNoLock(std::string const& name); + int loadCollection(arangodb::LogicalCollection* collection, TRI_vocbase_col_status_e& status, bool setStatus = true); diff --git a/arangod/dbg.cmake b/arangod/dbg.cmake index fcc3ff0dfe..0915feef68 100644 --- a/arangod/dbg.cmake +++ b/arangod/dbg.cmake @@ -7,6 +7,6 @@ message( "CMAKE_PROJECT_NAME ${CMAKE_PROJECT_NAME}/ CMAKE_INSTALL_SBINDIR ${CMAK install_debinfo( "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_SBINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGOD}${CMAKE_EXECUTABLE_SUFFIX}" + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}" + "${BIN_ARANGOD}" ) diff --git a/arangosh/CMakeLists.txt b/arangosh/CMakeLists.txt index 362ffafc08..6c05718de6 100644 --- a/arangosh/CMakeLists.txt +++ b/arangosh/CMakeLists.txt @@ -31,7 +31,6 @@ add_executable(${BIN_ARANGOBENCH} target_link_libraries(${BIN_ARANGOBENCH} ${LIB_ARANGO} - ${LINENOISE_LIBS} ${MSVC_LIBS} ${SYSTEM_LIBRARIES} boost_system @@ -78,7 +77,6 @@ add_executable(${BIN_ARANGODUMP} target_link_libraries(${BIN_ARANGODUMP} ${LIB_ARANGO} - ${LINENOISE_LIBS} ${MSVC_LIBS} ${SYSTEM_LIBRARIES} boost_system @@ -172,7 +170,6 @@ add_executable(${BIN_ARANGOIMP} target_link_libraries(${BIN_ARANGOIMP} ${LIB_ARANGO} - ${LINENOISE_LIBS} ${MSVC_LIBS} ${SYSTEM_LIBRARIES} boost_system @@ -219,7 +216,6 @@ add_executable(${BIN_ARANGORESTORE} target_link_libraries(${BIN_ARANGORESTORE} ${LIB_ARANGO} - ${LINENOISE_LIBS} ${MSVC_LIBS} ${SYSTEM_LIBRARIES} boost_system diff --git a/arangosh/dbg.cmake b/arangosh/dbg.cmake index 32dc3cff5f..3a78fe94c7 100644 --- a/arangosh/dbg.cmake +++ b/arangosh/dbg.cmake @@ -2,35 +2,38 @@ # these are the install targets for the client package. # we can't use RUNTIME DESTINATION here. +install_debinfo( + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" + "${CMAKE_PROJECT_NAME}" + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/" + "${BIN_ARANGOBENCH}") + +install_debinfo( + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" + "${CMAKE_PROJECT_NAME}" + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}" + "${BIN_ARANGODUMP}") install_debinfo( "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}") + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}" + "${BIN_ARANGOIMP}") install_debinfo( "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}") + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}" + "${BIN_ARANGORESTORE}") + install_debinfo( "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}") + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}" + "${BIN_ARANGOEXPORT}") + install_debinfo( "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}") -install_debinfo( - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" - "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}") -install_debinfo( - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip" - "${CMAKE_PROJECT_NAME}/${CMAKE_INSTALL_BINDIR}" - "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}" - "${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}") + "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}" + "${BIN_ARANGOSH}") diff --git a/arangosh/install.cmake b/arangosh/install.cmake index a690f498f1..49bac6e192 100644 --- a/arangosh/install.cmake +++ b/arangosh/install.cmake @@ -2,104 +2,15 @@ # these are the install targets for the client package. # we can't use RUNTIME DESTINATION here. -set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/strip") -execute_process(COMMAND mkdir -p ${STRIP_DIR}) - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOBENCH}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) -install_config(arangobench) - - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGODUMP}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) -install_config(arangodump) - - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOIMP}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) -install_config(arangoimp) - - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGORESTORE}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) -install_config(arangorestore) - - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOEXPORT}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) -install_config(arangoexport) - - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOSH}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) -install_config(arangosh) - - -set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${BIN_ARANGOVPACK}${CMAKE_EXECUTABLE_SUFFIX}) -set(STRIP_FILE ${STRIP_DIR}/${BIN_ARANGOVPACK}${CMAKE_EXECUTABLE_SUFFIX}) -if (NOT MSVC AND CMAKE_STRIP) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) - execute_process(COMMAND "cp" ${FILE} ${STRIP_DIR}) - execute_process(COMMAND "${CMAKE_STRIP}" ${STRIP_FILE}) - set(FILE ${STRIP_FILE}) -endif() -install( - PROGRAMS ${FILE} - DESTINATION ${CMAKE_INSTALL_BINDIR}) +set(STRIP_DIR "${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/cstrip") +add_custom_target(strip_install_client ALL) +strip_install_bin_and_config(arangobench ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) +strip_install_bin_and_config(arangodump ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) +strip_install_bin_and_config(arangoimp ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) +strip_install_bin_and_config(arangorestore ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) +strip_install_bin_and_config(arangoexport ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) +strip_install_bin_and_config(arangosh ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) +strip_install_bin_and_config(arangovpack ${STRIP_DIR} ${CMAKE_INSTALL_BINDIR} strip_install_client) install_command_alias(${BIN_ARANGOSH} ${CMAKE_INSTALL_BINDIR} diff --git a/cmake/InstallMacros.cmake b/cmake/InstallMacros.cmake index a9ad21de67..dc2889549b 100644 --- a/cmake/InstallMacros.cmake +++ b/cmake/InstallMacros.cmake @@ -11,6 +11,7 @@ endif() # Global macros ---------------------------------------------------------------- macro (generate_root_config name) + message(INFO "reading configuration file ${PROJECT_SOURCE_DIR}/etc/arangodb3/${name}.conf.in") FILE(READ ${PROJECT_SOURCE_DIR}/etc/arangodb3/${name}.conf.in FileContent) STRING(REPLACE "@PKGDATADIR@" "@ROOTDIR@/${CMAKE_INSTALL_DATAROOTDIR_ARANGO}" diff --git a/cmake/debugInformation.cmake b/cmake/debugInformation.cmake index d5a9bd9f7a..072cb9fef8 100644 --- a/cmake/debugInformation.cmake +++ b/cmake/debugInformation.cmake @@ -5,16 +5,21 @@ macro(install_debinfo STRIP_DIR USER_SUB_DEBINFO_DIR - USER_FILE - USER_STRIP_FILE) + USER_OUTPUT_DIRECTORY + USER_TARGET) + string(LENGTH "${USER_TARGET}" TLEN) + if (TLEN EQUAL 0) + message(FATAL_ERROR "empty target specified for creating debug file") + endif() + set(SUB_DEBINFO_DIR ${USER_SUB_DEBINFO_DIR}) - set(FILE ${USER_FILE}) - set(STRIP_FILE ${STRIP_DIR}/${USER_STRIP_FILE}) + set(FILE ${USER_OUTPUT_DIRECTORY}/${USER_TARGET}${CMAKE_EXECUTABLE_SUFFIX}) + set(STRIP_FILE ${STRIP_DIR}/${USER_TARGET}${CMAKE_EXECUTABLE_SUFFIX}) - execute_process(COMMAND mkdir -p ${STRIP_DIR}) - if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE) - execute_process(COMMAND "rm" -f ${STRIP_FILE}) + if (NOT MSVC AND CMAKE_STRIP AND FILE_EXECUTABLE AND STRIP_FILE) + execute_process(COMMAND ${CMAKE_COMMAND} -E make_directory ${STRIP_DIR}) + execute_process(COMMAND ${CMAKE_COMMAND} -E remove ${STRIP_FILE}) execute_process( COMMAND ${FILE_EXECUTABLE} ${FILE} @@ -33,7 +38,7 @@ macro(install_debinfo set(SUB_DEBINFO_DIR .build-id/${SUB_DIR}) set(STRIP_FILE "${STRIP_FILE}.debug") else () - set(STRIP_FILE ${USER_STRIP_FILE}) + set(STRIP_FILE ${USER_TARGET}${CMAKE_EXECUTABLE_SUFFIX}) endif() execute_process(COMMAND ${CMAKE_OBJCOPY} --only-keep-debug ${FILE} ${STRIP_DIR}/${STRIP_FILE}) set(FILE ${STRIP_DIR}/${STRIP_FILE}) @@ -43,7 +48,6 @@ macro(install_debinfo endif() endmacro() - # Detect whether this system has SHA checksums macro(detect_binary_id_type sourceVar) set(${sourceVar} false) @@ -63,3 +67,39 @@ macro(detect_binary_id_type sourceVar) endif() endif() endmacro() + +macro(strip_install_bin_and_config + TARGET + INTERMEDIATE_STRIP_DIR + TARGET_DIR + BIND_TARGET) + + string(LENGTH "${TARGET}" TLEN) + if (TLEN EQUAL 0) + message(FATAL_ERROR "empty target specified for creating stripped file") + endif() + set(FILE ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X}/${TARGET}${CMAKE_EXECUTABLE_SUFFIX}) + set(STRIP_FILE ${INTERMEDIATE_STRIP_DIR}/${TARGET}${CMAKE_EXECUTABLE_SUFFIX}) + if (NOT MSVC AND CMAKE_STRIP) + set(TARGET_NAME "${BIND_TARGET}_${TARGET}") + ExternalProject_Add("${TARGET_NAME}" + DEPENDS ${BIND_TARGET} + SOURCE_DIR ${CMAKE_RUNTIME_OUTPUT_DIRECTORY_X} + + CONFIGURE_COMMAND ${CMAKE_COMMAND} -E make_directory ${INTERMEDIATE_STRIP_DIR} + COMMENT "creating strip directory" + + BUILD_COMMAND ${CMAKE_STRIP} ${FILE} -o ${STRIP_FILE} + COMMENT "stripping binary" + INSTALL_COMMAND "" + ) + install(PROGRAMS ${STRIP_FILE} + DESTINATION ${TARGET_DIR}) + else () + install( + PROGRAMS ${FILE} + DESTINATION ${TARGET_DIR}) + endif() + install_config(${TARGET}) + +endmacro() diff --git a/cmake/packages/bundle.cmake b/cmake/packages/bundle.cmake index 439ceb44ce..bc1ddd83db 100644 --- a/cmake/packages/bundle.cmake +++ b/cmake/packages/bundle.cmake @@ -44,13 +44,13 @@ add_custom_target(package-arongodb-server-bundle list(APPEND PACKAGES_LIST package-arongodb-server-bundle) add_custom_target(copy_bundle_packages - COMMAND cp *.dmg ${PACKAGE_TARGET_DIR}) + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_PACKAGE_FILE_NAME}.dmg ${PACKAGE_TARGET_DIR}) list(APPEND COPY_PACKAGES_LIST copy_bundle_packages) add_custom_target(remove_packages - COMMAND rm -f *.dmg - COMMAND rm -rf _CPack_Packages + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.dmg + COMMAND ${CMAKE_COMMAND} -E remove_directory _CPack_Packages ) list(APPEND CLEAN_PACKAGES_LIST remove_packages) diff --git a/cmake/packages/client/deb.txt b/cmake/packages/client/deb.txt index b6032e6224..702942ba48 100644 --- a/cmake/packages/client/deb.txt +++ b/cmake/packages/client/deb.txt @@ -29,12 +29,13 @@ set(CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO @CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO@) set(INSTALL_CONFIGFILES_LIST) set(PACKAGING_HANDLE_CONFIG_FILES false) - +# set(CPACK_DEBIAN_PACKAGE_DEBUG TRUE) ################################################################################ # Substitute the install binaries: ################################################################################ set(BIN_ARANGOBENCH @BIN_ARANGOBENCH@) set(BIN_ARANGODUMP @BIN_ARANGODUMP@) +set(BIN_ARANGOEXPORT @BIN_ARANGOEXPORT@) set(BIN_ARANGOIMP @BIN_ARANGOIMP@) set(BIN_ARANGORESTORE @BIN_ARANGORESTORE@) set(BIN_ARANGOSH @BIN_ARANGOSH@) @@ -50,10 +51,12 @@ set(ARANGODB_PACKAGE_CONTACT @ARANGODB_PACKAGE_CONTACT@) set(ARANGODB_PACKAGE_REVISION @ARANGODB_PACKAGE_REVISION@) set(ARANGODB_PACKAGE_VENDOR @ARANGODB_PACKAGE_VENDOR@) set(CMAKE_TARGET_ARCHITECTURES @CMAKE_TARGET_ARCHITECTURES@) +set(ARANGODB_PACKAGE_ARCHITECTURE @ARANGODB_PACKAGE_ARCHITECTURE@) +set(CPACK_PACKAGE_NAME @CPACK_CLIENT_PACKAGE_NAME@) +set(CPACK_PACKAGE_FILE_NAME @ARANGODB_CLIENT_PACKAGE_FILE_NAME@) set(ORIGINAL_SOURCE_DIR @PROJECT_SOURCE_DIR@) set(PROJECT_SOURCE_DIR @PROJECT_SOURCE_DIR@) -set(CPACK_PACKAGE_NAME @CPACK_PACKAGE_NAME@) set(CPACKG_PACKAGE_CONFLICTS @CPACKG_PACKAGE_CONFLICTS@) set(ICU_DT "@ICU_DT@") @@ -64,7 +67,6 @@ set(INSTALL_ICU_DT_DEST "@INSTALL_ICU_DT_DEST@") # Get the final values for cpack: ################################################################################ set(CPACK_PACKAGE_VERSION "${ARANGODB_VERSION}") -set(CPACK_PACKAGE_NAME "${CPACK_PACKAGE_NAME}-client") set(CPACK_DEBIAN_PACKAGE_SECTION "shell") set(CPACK_PACKAGE_VENDOR ${ARANGODB_PACKAGE_VENDOR}) set(CPACK_PACKAGE_CONTACT ${ARANGODB_PACKAGE_CONTACT}) @@ -87,22 +89,7 @@ endif () file(READ "${PROJECT_SOURCE_DIR}/Installation/debian/client_packagedesc.txt" CPACK_DEBIAN_PACKAGE_DESCRIPTION) -################################################################################ -# specify which target archcitecture the package is going to be: -################################################################################ - -if(CMAKE_TARGET_ARCHITECTURES MATCHES ".*x86_64.*") - set(ARANGODB_PACKAGE_ARCHITECTURE "amd64") -elseif(CMAKE_TARGET_ARCHITECTURES MATCHES "aarch64") - set(ARANGODB_PACKAGE_ARCHITECTURE "arm64") -elseif(CMAKE_TARGET_ARCHITECTURES MATCHES "armv7") - set(ARANGODB_PACKAGE_ARCHITECTURE "armhf") -else() - set(ARANGODB_PACKAGE_ARCHITECTURE "i386") -endif() - set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE ${ARANGODB_PACKAGE_ARCHITECTURE}) -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") ################################################################################ # Install the external files into the package directory: @@ -110,6 +97,8 @@ set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${AR set(INSTALL_MACROS_NO_TARGET_INSTALL TRUE) +include(${CMAKE_ROOT}/Modules/ExternalProject.cmake) +include(${ORIGINAL_SOURCE_DIR}/cmake/debugInformation.cmake) include(${ORIGINAL_SOURCE_DIR}/cmake/InstallMacros.cmake) include(${ORIGINAL_SOURCE_DIR}/cmake/InstallArangoDBJSClient.cmake) include(${ORIGINAL_SOURCE_DIR}/arangosh/install.cmake) diff --git a/cmake/packages/client/nsis.txt b/cmake/packages/client/nsis.txt index 4617a7a1dc..ed893ad2b6 100644 --- a/cmake/packages/client/nsis.txt +++ b/cmake/packages/client/nsis.txt @@ -20,6 +20,7 @@ set(CMAKE_BUILD_TYPE @CMAKE_BUILD_TYPE@) set(BIN_ARANGOBENCH @BIN_ARANGOBENCH@) set(BIN_ARANGODUMP @BIN_ARANGODUMP@) +set(BIN_ARANGOEXPORT @BIN_ARANGOEXPORT@) set(BIN_ARANGOIMP @BIN_ARANGOIMP@) set(BIN_ARANGORESTORE @BIN_ARANGORESTORE@) set(BIN_ARANGOSH @BIN_ARANGOSH@) @@ -39,10 +40,11 @@ set(ARANGODB_PACKAGE_REVISION "@ARANGODB_PACKAGE_REVISION@") set(CMAKE_INSTALL_FULL_BINDIR "@CMAKE_INSTALL_FULL_BINDIR@") set(CMAKE_TARGET_ARCHITECTURES "@CMAKE_TARGET_ARCHITECTURES@") +set(CPACK_PACKAGE_FILE_NAME @ARANGODB_CLIENT_PACKAGE_FILE_NAME@) set(CMAKE_INSTALL_SYSCONFDIR_ARANGO "@CMAKE_INSTALL_SYSCONFDIR_ARANGO@") set(CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO "@CMAKE_INSTALL_FULL_SYSCONFDIR_ARANGO@") -set(CPACK_PACKAGE_NAME "@CPACK_PACKAGE_NAME@") +set(CPACK_PACKAGE_NAME "@CPACK_CLIENT_PACKAGE_NAME@") set(CMAKE_CL_64 @CMAKE_CL_64@) if (CMAKE_CL_64) @@ -68,7 +70,6 @@ set(INSTALL_ICU_DT_DEST "@INSTALL_ICU_DT_DEST@") ################################################################################ set(CPACK_PACKAGE_VERSION "${ARANGODB_VERSION}") -set(CPACK_PACKAGE_NAME "${CPACK_PACKAGE_NAME}-client") set(CPACK_PACKAGE_CONTACT ${ARANGODB_PACKAGE_CONTACT}) if (USE_ENTERPRISE) @@ -77,7 +78,6 @@ else () set(CPACK_RESOURCE_FILE_LICENSE "${PROJECT_SOURCE_DIR}/LICENSE") endif () -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") ################################################################################ # ------------------------------------------------------------------------------ @@ -88,7 +88,6 @@ set(CMAKE_INSTALL_FULL_SBINDIR "${CMAKE_INSTALL_FULL_BINDIR}") set(W_INSTALL_FILES "${PROJECT_SOURCE_DIR}/Installation/Windows/") set(CPACK_MODULE_PATH "${W_INSTALL_FILES}/client/Templates") set(CPACK_PLUGIN_PATH "${W_INSTALL_FILES}/client/Plugins") -set(CPACK_PACKAGE_NAME "ArangoSH") set(CPACK_NSIS_DISPLAY_NAME, ${ARANGODB_DISPLAY_NAME}) set(CPACK_NSIS_HELP_LINK ${ARANGODB_HELP_LINK}) set(CPACK_NSIS_URL_INFO_ABOUT ${ARANGODB_URL_INFO_ABOUT}) @@ -168,6 +167,8 @@ set(CPACK_ARANGODB_NSIS_DEFINES " ################################################################################ # Install the external files into the package directory: ################################################################################ +include(${CMAKE_ROOT}/Modules/ExternalProject.cmake) +include(${ORIGINAL_SOURCE_DIR}/cmake/debugInformation.cmake) include(${ORIGINAL_SOURCE_DIR}/cmake/InstallArangoDBJSClient.cmake) include(${ORIGINAL_SOURCE_DIR}/arangosh/install.cmake) diff --git a/cmake/packages/dbg/deb.txt b/cmake/packages/dbg/deb.txt index 31d01c0794..ff25c43538 100644 --- a/cmake/packages/dbg/deb.txt +++ b/cmake/packages/dbg/deb.txt @@ -1,7 +1,7 @@ ################################################################################ -# the client package is a complete cmake sub package. +# the debug package is a complete cmake sub package. ################################################################################ -project(@CMAKE_PROJECT_NAME@) +project(PACKAGE-DBG) cmake_minimum_required(VERSION 2.8) ################################################################################ @@ -45,11 +45,12 @@ set(BIN_ARANGOD @BIN_ARANGOD@) set(BIN_ARANGOBENCH @BIN_ARANGOBENCH@) set(BIN_ARANGODUMP @BIN_ARANGODUMP@) +set(BIN_ARANGOEXPORT @BIN_ARANGOEXPORT@) set(BIN_ARANGOIMP @BIN_ARANGOIMP@) set(BIN_ARANGORESTORE @BIN_ARANGORESTORE@) set(BIN_ARANGOSH @BIN_ARANGOSH@) set(BIN_ARANGOVPACK @BIN_ARANGOVPACK@) - +# set(CPACK_DEBIAN_PACKAGE_DEBUG TRUE) ################################################################################ # build specific variables: ################################################################################ @@ -75,7 +76,7 @@ set(CPACK_DEBIAN_PACKAGE_SECTION "devel") set(CPACK_PACKAGE_VENDOR "${ARANGODB_PACKAGE_VENDOR}") set(CPACK_PACKAGE_CONTACT ${ARANGODB_PACKAGE_CONTACT}) set(CPACK_DEBIAN_PACKAGE_HOMEPAGE ${ARANGODB_URL_INFO_ABOUT}) -set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS ON) +set(CPACK_DEBIAN_PACKAGE_SHLIBDEPS OFF) set(CPACK_DEBIAN_COMPRESSION_TYPE "xz") set(CPACK_COMPONENTS_ALL debian-extras) set(CPACK_GENERATOR "DEB") @@ -107,7 +108,7 @@ else() endif() set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE ${ARANGODB_PACKAGE_ARCHITECTURE}) -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") +set(CPACK_PACKAGE_FILE_NAME @ARANGODB_DBG_PACKAGE_FILE_NAME@) ################################################################################ # Install the external files into the package directory: diff --git a/cmake/packages/deb.cmake b/cmake/packages/deb.cmake index 64561b065b..7b27ba3bef 100644 --- a/cmake/packages/deb.cmake +++ b/cmake/packages/deb.cmake @@ -42,17 +42,13 @@ list(APPEND CPACK_DEBIAN_PACKAGE_CONTROL_EXTRA "${PROJECT_SOURCE_DIR}/Installation/debian/postrm" "${PROJECT_SOURCE_DIR}/Installation/debian/prerm") -if(CMAKE_TARGET_ARCHITECTURES MATCHES ".*x86_64.*") - set(ARANGODB_PACKAGE_ARCHITECTURE "amd64") -elseif(CMAKE_TARGET_ARCHITECTURES MATCHES "aarch64") - set(ARANGODB_PACKAGE_ARCHITECTURE "arm64") -elseif(CMAKE_TARGET_ARCHITECTURES MATCHES "armv7") - set(ARANGODB_PACKAGE_ARCHITECTURE "armhf") -else() - set(ARANGODB_PACKAGE_ARCHITECTURE "i386") -endif() +################################################################################ +# specify which target archcitecture the package is going to be: +################################################################################ + set(CPACK_DEBIAN_PACKAGE_ARCHITECTURE ${ARANGODB_PACKAGE_ARCHITECTURE}) -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") + +set(ARANGODB_DBG_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-dbg-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") set(conffiles_list "") if ("${INSTALL_CONFIGFILES_LIST}" STREQUAL "") @@ -91,26 +87,41 @@ list(APPEND PACKAGES_LIST package-arongodb-server) ################################################################################ # hook to build the client package ################################################################################ +set(CPACK_CLIENT_PACKAGE_NAME "${CPACK_PACKAGE_NAME}-client") + +set(ARANGODB_CLIENT_PACKAGE_FILE_NAME "${CPACK_CLIENT_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") + set(CLIENT_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/packages/arangodb-client) configure_file(cmake/packages/client/deb.txt ${CLIENT_BUILD_DIR}/CMakeLists.txt @ONLY) add_custom_target(package-arongodb-client COMMAND ${CMAKE_COMMAND} . + COMMENT "configuring client package environment" COMMAND ${CMAKE_CPACK_COMMAND} -G DEB - COMMAND cp *.deb ${PROJECT_BINARY_DIR} + COMMENT "building client packages" + COMMAND ${CMAKE_COMMAND} -E copy ${CLIENT_BUILD_DIR}/${ARANGODB_CLIENT_PACKAGE_FILE_NAME}.deb ${PROJECT_BINARY_DIR} + COMMENT "uploading client packages" WORKING_DIRECTORY ${CLIENT_BUILD_DIR}) list(APPEND PACKAGES_LIST package-arongodb-client) add_custom_target(copy_deb_packages - COMMAND cp *.deb ${PACKAGE_TARGET_DIR}) + COMMAND ${CMAKE_COMMAND} -E copy ${ARANGODB_CLIENT_PACKAGE_FILE_NAME}.deb ${PACKAGE_TARGET_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_PACKAGE_FILE_NAME}.deb ${PACKAGE_TARGET_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${ARANGODB_DBG_PACKAGE_FILE_NAME}.deb ${PACKAGE_TARGET_DIR} + COMMENT "copying packages to ${PACKAGE_TARGET_DIR}") list(APPEND COPY_PACKAGES_LIST copy_deb_packages) add_custom_target(remove_packages - COMMAND rm -f *.deb - COMMAND rm -rf _CPack_Packages - COMMAND rm -rf packages + COMMAND ${CMAKE_COMMAND} -E remove_directory _CPack_Packages + COMMENT Removing server packaging build directory + COMMAND ${CMAKE_COMMAND} -E remove_directory packages + COMMENT Removing client packaging build directory + COMMAND ${CMAKE_COMMAND} -E remove ${ARANGODB_CLIENT_PACKAGE_FILE_NAME}.deb + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.deb + COMMAND ${CMAKE_COMMAND} -E remove ${ARANGODB_DBG_PACKAGE_FILE_NAME}.deb + COMMENT Removing local target packages ) list(APPEND CLEAN_PACKAGES_LIST remove_packages) @@ -123,9 +134,9 @@ set(DEBUG_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/packages/arangodb3-dbg) configure_file(cmake/packages/dbg/deb.txt ${DEBUG_BUILD_DIR}/CMakeLists.txt @ONLY) add_custom_target(package-arongodb-dbg - COMMAND ${CMAKE_COMMAND} . -DCMAKE_OBJCOPY=${CMAKE_OBJCOPY} + COMMAND ${CMAKE_COMMAND} . -DCMAKE_OBJCOPY=${CMAKE_OBJCOPY} COMMAND ${CMAKE_CPACK_COMMAND} -G DEB - COMMAND cp *.deb ${PROJECT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${ARANGODB_DBG_PACKAGE_FILE_NAME}.deb ${PROJECT_BINARY_DIR} WORKING_DIRECTORY ${DEBUG_BUILD_DIR}) list(APPEND PACKAGES_LIST package-arongodb-dbg) diff --git a/cmake/packages/nsis.cmake b/cmake/packages/nsis.cmake index b43a8b43ec..608ec09c3a 100644 --- a/cmake/packages/nsis.cmake +++ b/cmake/packages/nsis.cmake @@ -13,15 +13,13 @@ set(CPACK_NSIS_MODIFY_PATH ON) set(CPACK_NSIS_ENABLE_UNINSTALL_BEFORE_INSTALL 1) set(CPACK_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Installation/Windows/Templates") set(CPACK_PLUGIN_PATH "${CMAKE_CURRENT_SOURCE_DIR}/Installation/Windows/Plugins") -set(BITS 64) + if (CMAKE_CL_64) # this needs to remain a $string for the template: SET(CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES64") - SET(ARANGODB_PACKAGE_ARCHITECTURE "win64") SET(BITS 64) else () SET(CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES") - SET(ARANGODB_PACKAGE_ARCHITECTURE "win32") SET(BITS 32) endif () @@ -59,7 +57,6 @@ set(CPACK_ARANGODB_NSIS_DEFINES " !define BIN_DIR '${W_BIN_DIR}' ") -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") ################################################################################ # hook to build the server package @@ -82,6 +79,10 @@ list(APPEND PACKAGES_LIST package-arongodb-server-zip) ################################################################################ # hook to build the client package ################################################################################ +set(CPACK_CLIENT_PACKAGE_NAME "${CPACK_PACKAGE_NAME}-client") + +set(ARANGODB_CLIENT_PACKAGE_FILE_NAME "${CPACK_CLIENT_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") + string(LENGTH "${CLIENT_BUILD_DIR}" CLIENT_BUILD_DIR_LEN) if (${CLIENT_BUILD_DIR_LEN} EQUAL 0) set(CLIENT_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/../p) @@ -90,27 +91,35 @@ endif() configure_file(cmake/packages/client/nsis.txt ${CLIENT_BUILD_DIR}/CMakeLists.txt @ONLY) add_custom_target(package-arongodb-client-nsis COMMAND ${CMAKE_COMMAND} . + COMMENT "configuring client package environment" COMMAND ${CMAKE_CPACK_COMMAND} -G NSIS -C ${CMAKE_BUILD_TYPE} - COMMAND cp *.exe ${PROJECT_BINARY_DIR} + COMMENT "building client packages" + COMMAND ${CMAKE_COMMAND} -E copy ${CLIENT_BUILD_DIR}/${ARANGODB_CLIENT_PACKAGE_FILE_NAME}.exe ${PROJECT_BINARY_DIR} + COMMENT "uploading client packages" WORKING_DIRECTORY ${CLIENT_BUILD_DIR}) list(APPEND PACKAGES_LIST package-arongodb-client-nsis) add_custom_target(copy_nsis_packages - COMMAND cp *.exe ${PACKAGE_TARGET_DIR}) + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_PACKAGE_FILE_NAME}.exe ${PACKAGE_TARGET_DIR}) list(APPEND COPY_PACKAGES_LIST copy_nsis_packages) add_custom_target(copy_zip_packages - COMMAND cp *.zip ${PACKAGE_TARGET_DIR}) + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_PACKAGE_FILE_NAME}.zip ${PACKAGE_TARGET_DIR}) list(APPEND COPY_PACKAGES_LIST copy_zip_packages) add_custom_target(remove_packages - COMMAND rm -f *.zip - COMMAND rm -f *.exe - COMMAND rm -rf _CPack_Packages + COMMAND ${CMAKE_COMMAND} -E remove_directory _CPack_Packages + COMMENT Removing server packaging build directory + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.zip + COMMENT Removing local target zip packages + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.exe + COMMENT Removing local target nsis packages + COMMAND ${CMAKE_COMMAND} -E remove ${ARANGODB_CLIENT_PACKAGE_FILE_NAME}.exe + COMMENT Removing local target nsis client packages ) list(APPEND CLEAN_PACKAGES_LIST remove_packages) diff --git a/cmake/packages/packages.cmake b/cmake/packages/packages.cmake index 2e6a293bcc..df61c5a2f9 100644 --- a/cmake/packages/packages.cmake +++ b/cmake/packages/packages.cmake @@ -25,39 +25,52 @@ else () endif () set(ARANGODB_PACKAGE_ARCHITECTURE ${CMAKE_SYSTEM_PROCESSOR}) # eventually the package string will be modified later on: -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}.${ARANGODB_PACKAGE_ARCHITECTURE}") if ("${PACKAGING}" STREQUAL "DEB") - include(packages/deb) -elseif ("${PACKAGING}" STREQUAL "RPM") - include(packages/rpm) -elseif ("${PACKAGING}" STREQUAL "Bundle") - include(packages/bundle) -elseif (MSVC) - include(packages/nsis) -endif () - -################################################################################ -## generic tarball -################################################################################ -set(CPACK_PACKAGE_TGZ "${CMAKE_BINARY_DIR}/${CPACK_PACKAGE_FILE_NAME}.tar.gz") -add_custom_target(TGZ_package - COMMENT "create TGZ-package" - COMMAND ${CMAKE_CPACK_COMMAND} -G TGZ -C ${CMAKE_BUILD_TYPE} - ) - - -################################################################################ -## SNAPCRAFT PACKAGE -################################################################################ - -if (USE_SNAPCRAFT) - if(NOT DEFINED SNAP_PORT) - set(SNAP_PORT 8529) + if(CMAKE_TARGET_ARCHITECTURES MATCHES ".*x86_64.*") + set(ARANGODB_PACKAGE_ARCHITECTURE "amd64") + elseif(CMAKE_TARGET_ARCHITECTURES MATCHES "aarch64") + set(ARANGODB_PACKAGE_ARCHITECTURE "arm64") + elseif(CMAKE_TARGET_ARCHITECTURES MATCHES "armv7") + set(ARANGODB_PACKAGE_ARCHITECTURE "armhf") + else() + set(ARANGODB_PACKAGE_ARCHITECTURE "i386") endif() - include(packages/snap) + + set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") + + include(packages/deb) + include(packages/tar) + if (USE_SNAPCRAFT) + if(NOT DEFINED SNAP_PORT) + set(SNAP_PORT 8529) + endif() + include(packages/snap) + endif () +elseif ("${PACKAGING}" STREQUAL "RPM") + set(PACKAGE_VERSION "-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}.${ARANGODB_PACKAGE_ARCHITECTURE}") + set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}${PACKAGE_VERSION}") + include(packages/rpm) + include(packages/tar) +elseif ("${PACKAGING}" STREQUAL "Bundle") + set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_x86_64") + include(packages/bundle) + include(packages/tar) +elseif (MSVC) + if (CMAKE_CL_64) + SET(ARANGODB_PACKAGE_ARCHITECTURE "win64") + else () + SET(ARANGODB_PACKAGE_ARCHITECTURE "win32") + endif () + set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") + include(packages/nsis) + include(packages/tar) +else () + set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") + include(packages/tar) endif () + configure_file( "${CMAKE_SOURCE_DIR}/Installation/cmake/CMakeCPackOptions.cmake.in" "${CMAKE_BINARY_DIR}/CMakeCPackOptions.cmake" @ONLY) diff --git a/cmake/packages/rpm.cmake b/cmake/packages/rpm.cmake index 70f0ce723a..08f71aeb42 100644 --- a/cmake/packages/rpm.cmake +++ b/cmake/packages/rpm.cmake @@ -2,7 +2,6 @@ set(CPACK_GENERATOR "RPM") if (CMAKE_DEBUG_FILENAMES_SHA_SUM) - message("IFFF!") set(CPACK_DEBUG_DIRECTORY_PATTERN "/usr/lib*/debug/.build-id/*") else() set(CPACK_DEBUG_DIRECTORY_PATTERN "/usr/lib*/debug/*") @@ -35,10 +34,9 @@ install( RENAME ${RPM_INIT_SCRIPT_TARGET_NAME} ) -# -set(PACKAGE_VERSION "-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}.${ARANGODB_PACKAGE_ARCHITECTURE}") -set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}${PACKAGE_VERSION}") +# set(CPACK_CLIENT_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-client${PACKAGE_VERSION}") +set(CPACK_DBG_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-debuginfo${PACKAGE_VERSION}") set(CPACK_RPM_PACKAGE_RELOCATABLE FALSE) # set(CPACK_RPM_PACKAGE_DEBUG TRUE) @@ -53,31 +51,30 @@ include(arangod/dbg.cmake) add_custom_target(package-arongodb-server COMMAND ${CMAKE_COMMAND} . COMMAND ${CMAKE_CPACK_COMMAND} -G RPM - COMMAND cp "${CPACK_TEMPORARY_DIRECTORY}/*.rpm" "${PROJECT_BINARY_DIR}" + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_TEMPORARY_DIRECTORY}/${CPACK_PACKAGE_FILE_NAME}.rpm ${PROJECT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_TEMPORARY_DIRECTORY}/${CPACK_CLIENT_PACKAGE_FILE_NAME}.rpm ${PROJECT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_TEMPORARY_DIRECTORY}/${CPACK_DBG_PACKAGE_FILE_NAME}.rpm ${PROJECT_BINARY_DIR} WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) list(APPEND PACKAGES_LIST package-arongodb-server) ################################################################################# ## hook to build the client package ################################################################################# -#set(CLIENT_BUILD_DIR ${CMAKE_CURRENT_BINARY_DIR}/packages/arangodb-client) -#configure_file(cmake/packages/client/rpm.txt ${CLIENT_BUILD_DIR}/CMakeLists.txt @ONLY) -#add_custom_target(package-arongodb-client -# COMMAND ${CMAKE_COMMAND} . -# COMMAND ${CMAKE_CPACK_COMMAND} -G RPM -# COMMAND cp *.rpm ${PROJECT_BINARY_DIR} -# WORKING_DIRECTORY ${CLIENT_BUILD_DIR}) -# -# -#list(APPEND PACKAGES_LIST package-arongodb-client) add_custom_target(copy_rpm_packages - COMMAND cp *.rpm ${PACKAGE_TARGET_DIR}) + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_PACKAGE_FILE_NAME}.rpm ${PACKAGE_TARGET_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_CLIENT_PACKAGE_FILE_NAME}.rpm ${PACKAGE_TARGET_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_DBG_PACKAGE_FILE_NAME}.rpm ${PACKAGE_TARGET_DIR}) list(APPEND COPY_PACKAGES_LIST copy_rpm_packages) add_custom_target(remove_packages - COMMAND rm -f *.rpm - COMMAND rm -rf _CPack_Packages + COMMAND ${CMAKE_COMMAND} -E remove_directory _CPack_Packages + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_FILE_NAME}.rpm + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_CLIENT_PACKAGE_FILE_NAME}.rpm + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_DBG_PACKAGE_FILE_NAME}.rpm ) list(APPEND CLEAN_PACKAGES_LIST remove_packages) + + + diff --git a/cmake/packages/snap.cmake b/cmake/packages/snap.cmake index ea7dcf0cd1..9b9230abb3 100644 --- a/cmake/packages/snap.cmake +++ b/cmake/packages/snap.cmake @@ -5,16 +5,15 @@ if(EXISTS ${SNAP_EXE}) endif() if(SNAPCRAFT_FOUND) - set(CPACK_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}") set(SNAPCRAFT_TEMPLATE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/Installation/Ubuntu") set(SNAPCRAFT_SOURCE_DIR "${CMAKE_BINARY_DIR}/_CPack_Packages/SNAP") + set(CPACK_SNAP_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}.snap") - message(STATUS "Create snap package") + message(STATUS "Creating snap package") if (VERBOSE) message(STATUS "CPACK_PACKAGE_FILE_NAME :" ${CPACK_PACKAGE_FILE_NAME}) message(STATUS "SNAPCRAFT_TEMPLATE_DIR: " ${SNAPCRAFT_TEMPLATE_DIR}) - message(STATUS "CPACK_PACKAGE_TGZ: " ${CPACK_PACKAGE_TGZ}) message(STATUS "SNAPCRAFT_SOURCE_DIR: " ${SNAPCRAFT_SOURCE_DIR}) endif () @@ -33,17 +32,16 @@ if(SNAPCRAFT_FOUND) COPY "${SNAPCRAFT_TEMPLATE_DIR}/arangodb.png" DESTINATION "${SNAPCRAFT_SOURCE_DIR}/" ) - add_custom_target(snap COMMENT "create snap-package" COMMAND ${SNAP_EXE} snap - COMMAND cp *.snap ${PROJECT_BINARY_DIR} + COMMAND ${CMAKE_COMMAND} -E copy ${SNAPCRAFT_SOURCE_DIR}/${CPACK_SNAP_PACKAGE_FILE_NAME} ${PROJECT_BINARY_DIR} DEPENDS TGZ_package WORKING_DIRECTORY ${SNAPCRAFT_SOURCE_DIR} ) add_custom_target(copy_snap_packages - COMMAND cp *.snap ${PACKAGE_TARGET_DIR}) + COMMAND ${CMAKE_COMMAND} -E copy ${CPACK_SNAP_PACKAGE_FILE_NAME} ${PACKAGE_TARGET_DIR}) list(APPEND COPY_PACKAGES_LIST copy_snap_packages) diff --git a/cmake/packages/tar.cmake b/cmake/packages/tar.cmake new file mode 100644 index 0000000000..636f9f1bad --- /dev/null +++ b/cmake/packages/tar.cmake @@ -0,0 +1,19 @@ + +################################################################################ +## generic tarball +################################################################################ +set(CPACK_PACKAGE_TGZ "${CMAKE_BINARY_DIR}/${CPACK_PACKAGE_FILE_NAME}.tar.gz") +add_custom_target(TGZ_package + COMMENT "create TGZ-package" + COMMAND ${CMAKE_CPACK_COMMAND} -G TGZ -C ${CMAKE_BUILD_TYPE} + ) + +add_custom_target(remove_tgz_packages + COMMAND ${CMAKE_COMMAND} -E remove_directory _CPack_Packages + COMMENT Removing server packaging build directory + COMMAND ${CMAKE_COMMAND} -E remove ${CPACK_PACKAGE_TGZ} + COMMENT Removing local tgz packages + ) + +list(APPEND CLEAN_PACKAGES_LIST remove_tgz_packages) + diff --git a/etc/arangodb3/arangovpack.conf.in b/etc/arangodb3/arangovpack.conf.in new file mode 100644 index 0000000000..e69de29bb2 diff --git a/js/client/modules/@arangodb/testing.js b/js/client/modules/@arangodb/testing.js index f25f359a6c..8b05bc49dd 100644 --- a/js/client/modules/@arangodb/testing.js +++ b/js/client/modules/@arangodb/testing.js @@ -79,6 +79,7 @@ const optionsDocumentation = [ ' - `skipArangoBench`: if set to true benchmark tests are skipped', ' - `skipAuthentication : testing authentication and authentication_paramaters will be skipped.', ' - `skipBoost`: if set to true the boost unittests are skipped', + ' - `skipCache`: if set to true, the hash cache unittests are skipped', ' - `skipConfig`: omit the noisy configuration tests', ' - `skipFoxxQueues`: omit the test for the foxx queues', ' - `skipEndpoints`: if set to true endpoints tests are skipped', @@ -178,6 +179,7 @@ const optionsDefaults = { 'skipArangoBenchNonConnKeepAlive': true, 'skipAuthentication': false, 'skipBoost': false, + 'skipCache': true, 'skipEndpoints': false, 'skipGeo': false, 'skipLogAnalysis': true, @@ -424,7 +426,7 @@ function readImportantLogLines (logPath) { // / echo 1 > /proc/sys/kernel/core_uses_pid // / echo /var/tmp/core-%e-%p-%t > /proc/sys/kernel/core_pattern // / -// / or at system startup by altering /etc/sysctl.d/corepattern.conf : +// / or at system startup by altering /etc/sysctl.d/corepattern.conf : // / # We want core files to be located in a central location // / # and know the PID plus the process name for later use. // / kernel.core_uses_pid = 1 @@ -458,7 +460,7 @@ function analyzeCoreDump (instanceInfo, options, storeArangodPath, pid) { executeExternalAndWait('/bin/bash', args); GDB_OUTPUT = fs.read(gdbOutputFile); print(GDB_OUTPUT); - + } // ////////////////////////////////////////////////////////////////////////////// @@ -512,7 +514,7 @@ function analyzeServerCrash (arangod, options, checkStr) { print(RED + "apport handles corefiles on your system. Uninstall it if you want us to get corefiles for analysis."); return; } - + if (matchSystemdCoredump.exec(cp) == null) { options.coreDirectory = "/var/lib/systemd/coredump"; } @@ -1202,7 +1204,7 @@ function runArangoBenchmark (options, instanceInfo, cmds) { 'server.username': options.username, 'server.password': options.password, 'server.endpoint': instanceInfo.endpoint, - // "server.request-timeout": 1200 // default now. + // "server.request-timeout": 1200 // default now. 'server.connection-timeout': 10 // 5s default }; @@ -1229,7 +1231,7 @@ function shutdownArangod (arangod, options) { arangod.exitStatus.status === 'RUNNING') { const requestOptions = makeAuthorizationHeaders(options); requestOptions.method = 'DELETE'; - + print(arangod.url + '/_admin/shutdown'); if (options.useKillExternal) { killExternal(arangod.pid); @@ -1304,7 +1306,7 @@ function shutdownInstance (instanceInfo, options) { fs.join(instanceInfo.rootDir, 'core.dmp') ]; } - + killExternal(arangod.pid, 11); analyzeServerCrash(arangod, options, 'instance forcefully KILLED after 60s - ' + arangod.exitStatus.signal); @@ -1436,7 +1438,7 @@ function startArango (protocol, options, addArgs, rootDir, role) { let args = makeArgsArangod(options, appDir, role); let endpoint; let port; - + if (!addArgs['server.endpoint']) { port = findFreePort(options.minPort, options.maxPort); endpoint = protocol + '://127.0.0.1:' + port; @@ -1472,7 +1474,7 @@ function startArango (protocol, options, addArgs, rootDir, role) { instanceInfo.pid = executeArangod(ARANGOD_BIN, toArgv(args), options).pid; } catch (x) { print('failed to run arangod - ' + JSON.stringify(x)); - throw(x); + throw(x); } instanceInfo.role = role; @@ -1520,7 +1522,7 @@ function startInstanceAgency (instanceInfo, protocol, options, addArgs, rootDir) instanceArgs['server.endpoint'] = protocol + '://127.0.0.1:' + port; instanceArgs['agency.my-address'] = protocol + '://127.0.0.1:' + port; instanceArgs['agency.supervision-grace-period'] = '10.0'; - instanceArgs['agency.supervision-frequency'] = '1.0'; + instanceArgs['agency.supervision-frequency'] = '1.0'; if (i === N - 1) { let l = []; @@ -1837,7 +1839,7 @@ function findTests () { } testsCases.common = doOnePath('js/common/tests/shell'); - + testsCases.server_only = doOnePath('js/server/tests/shell'); testsCases.client_only = doOnePath('js/client/tests/shell'); @@ -2069,7 +2071,7 @@ testFuncs.fail = function (options) { success: { status: true, message: "this testcase will always be successfull", - duration: 1 + duration: 1 } } }; @@ -2081,15 +2083,15 @@ testFuncs.fail = function (options) { testFuncs.arangosh = function (options) { let ret = {}; - [ - 'testArangoshExitCodeNoConnect', - 'testArangoshExitCodeFail', - 'testArangoshExitCodeFailButCaught', - 'testArangoshExitCodeEmpty', - 'testArangoshExitCodeSuccess', - 'testArangoshExitCodeStatements', - 'testArangoshExitCodeStatements2', - 'testArangoshExitCodeNewlines', + [ + 'testArangoshExitCodeNoConnect', + 'testArangoshExitCodeFail', + 'testArangoshExitCodeFailButCaught', + 'testArangoshExitCodeEmpty', + 'testArangoshExitCodeSuccess', + 'testArangoshExitCodeStatements', + 'testArangoshExitCodeStatements2', + 'testArangoshExitCodeNewlines', 'testArangoshExitCodeEcho', 'testArangoshShebang', ].forEach(function(what) { @@ -2120,37 +2122,37 @@ testFuncs.arangosh = function (options) { "didn't get expected return code (" + expectedReturnCode + "): \n" + yaml.safeDump(rc); } - + ++ret[section]['total']; ret[section]['status'] = failSuccess; ret[section]['duration'] = deltaTime; print((failSuccess ? GREEN : RED) + 'Status: ' + (failSuccess ? 'SUCCESS' : 'FAIL') + RESET); } - + runTest('testArangoshExitCodeNoConnect', 'Starting arangosh with failing connect:', "db._databases();", 1, { 'server.endpoint' : 'tcp://127.0.0.1:0' }); print(); runTest('testArangoshExitCodeFail', 'Starting arangosh with exception throwing script:', "throw('foo')", 1, {}); print(); - + runTest('testArangoshExitCodeFailButCaught', 'Starting arangosh with a caught exception:', "try { throw('foo'); } catch (err) {}", 0, {}); print(); - + runTest('testArangoshExitCodeEmpty', 'Starting arangosh with empty script:', "", 0, {}); print(); - + runTest('testArangoshExitCodeSuccess', 'Starting arangosh with regular terminating script:', ";", 0, {}); print(); - + runTest('testArangoshExitCodeStatements', 'Starting arangosh with multiple statements:', "var a = 1; if (a !== 1) throw('boom!');", 0, {}); print(); - + runTest('testArangoshExitCodeStatements2', 'Starting arangosh with multiple statements:', "var a = 1;\nif (a !== 1) throw('boom!');\nif (a === 1) print('success');", 0, {}); print(); - + runTest('testArangoshExitCodeNewlines', 'Starting arangosh with newlines:', "q = `FOR i\nIN [1,2,3]\nRETURN i`;\nq += 'abc'\n", 0, {}); print(); - + if (platform.substr(0, 3) !== 'win') { var echoSuccess = true; var deltaTime2 = 0; @@ -2159,7 +2161,7 @@ testFuncs.arangosh = function (options) { print('\n--------------------------------------------------------------------------------'); print('Starting arangosh via echo'); print('--------------------------------------------------------------------------------'); - + fs.write(execFile, 'echo "db._databases();" | ' + fs.makeAbsolute(ARANGOSH_BIN) + ' --server.endpoint tcp://127.0.0.1:0'); @@ -2176,7 +2178,7 @@ testFuncs.arangosh = function (options) { "didn't get expected return code (1): \n" + yaml.safeDump(rc); } - + fs.remove(execFile); ++ret.testArangoshExitCodeEcho['total']; @@ -2184,7 +2186,7 @@ testFuncs.arangosh = function (options) { ret.testArangoshExitCodeEcho['duration'] = deltaTime2; print((echoSuccess ? GREEN : RED) + 'Status: ' + (echoSuccess ? 'SUCCESS' : 'FAIL') + RESET); } - + // test shebang execution with arangosh if (!options.skipShebang && platform.substr(0, 3) !== 'win') { var shebangSuccess = true; @@ -2660,6 +2662,20 @@ testFuncs.boost = function (options) { } } + if (!options.skipCache) { + const run = locateBoostTest('cache_suite'); + + if (run !== '') { + results.cache_suite = executeAndWait(run, args, options, + 'cache_suite'); + } else { + results.cache_suite = { + status: false, + message: "binary 'cache_suite' not found" + }; + } + } + if (!options.skipGeo) { const run = locateBoostTest('geo_suite'); @@ -3529,7 +3545,7 @@ testFuncs.replication_static = function (options) { let master = startInstance('tcp', options, { 'server.authentication': 'true' }, 'master_static'); - + const mr = makeResults('replication', master); if (master === false) { @@ -3818,7 +3834,7 @@ testFuncs.endpoints = function(options) { } let result = runInArangosh(options, instanceInfo, 'js/client/tests/endpoint-spec.js'); - + print(CYAN + 'Shutting down...' + RESET); // mop: mehhh...when launched with a socket we can't use download :S shutdownInstance(instanceInfo, Object.assign(options, {useKillExternal: true})); @@ -4014,15 +4030,15 @@ testFuncs.upgrade = function (options) { ++result.upgrade.total; result.upgrade.second = executeAndWait(ARANGOD_BIN, argv, options, 'upgrade'); - + if (result.upgrade.second.status !== true) { print('not removing ' + tmpDataDir); return result.upgrade; } cleanupDirectories.push(tmpDataDir); - - result.upgrade.status = true; + + result.upgrade.status = true; return result; }; diff --git a/js/common/modules/@arangodb/aql/explainer.js b/js/common/modules/@arangodb/aql/explainer.js index f2aa6e9cf0..9fefe16678 100644 --- a/js/common/modules/@arangodb/aql/explainer.js +++ b/js/common/modules/@arangodb/aql/explainer.js @@ -987,7 +987,7 @@ function processQuery (query, explain) { } translate = ['ANY', 'INBOUND', 'OUTBOUND']; var defaultDirection = node.directions[0]; - rc = `${keyword("FOR")} ${parts.join(", ")} ${keyword("IN") } ${keyword(translate[defaultDirection])} `; + rc = `${keyword("FOR")} ${parts.join(", ")} ${keyword("IN") } ${keyword(translate[defaultDirection])} ${keyword("SHORTEST_PATH") } `; if (node.hasOwnProperty('startVertexId')) { rc += `'${value(node.startVertexId)}'`; } else { diff --git a/js/server/tests/aql/aql-failures-noncluster.js b/js/server/tests/aql/aql-failures-noncluster.js index e554b3d525..36b8ef4dca 100644 --- a/js/server/tests/aql/aql-failures-noncluster.js +++ b/js/server/tests/aql/aql-failures-noncluster.js @@ -34,7 +34,6 @@ var arangodb = require("@arangodb"); var db = arangodb.db; var internal = require("internal"); - //////////////////////////////////////////////////////////////////////////////// /// @brief test suite //////////////////////////////////////////////////////////////////////////////// @@ -246,6 +245,7 @@ function ahuacatlFailureSuite () { //////////////////////////////////////////////////////////////////////////////// /// @brief test failure //////////////////////////////////////////////////////////////////////////////// + testSortBlock4 : function () { internal.debugSetFailAt("SortBlock::doSortingNext1"); c.ensureSkiplist("value"); diff --git a/js/server/tests/aql/aql-graph-traverser.js b/js/server/tests/aql/aql-graph-traverser.js index d215367322..f8578e189c 100644 --- a/js/server/tests/aql/aql-graph-traverser.js +++ b/js/server/tests/aql/aql-graph-traverser.js @@ -1579,14 +1579,15 @@ function optimizeInSuite () { // if the rule is disabled we expect to do way more filtering var noOpt = { optimizer: { rules: [ "-all" ] } }; result = db._query(vertexQuery, bindVars, {}, noOpt); + extra = result.getExtra(); - // For each vertex not in the list we filter once for every conncted edge + // For each vertex not in the list we filter once for every connected edge assertEqual(extra.stats.filtered, 90 * 100); assertEqual(result.count(), 1000); result = db._query(edgeQuery, bindVars, {}, noOpt); extra = result.getExtra(); - // For each vertex not in the list we filter once for every conncted edge + // For each vertex not in the list we filter once for every connected edge assertEqual(extra.stats.filtered, 90 * 100); assertEqual(result.count(), 1000); }, @@ -1660,25 +1661,25 @@ function optimizeInSuite () { var noOpt = { optimizer: { rules: [ "-all" ] } }; result = db._query(vertexQuery, bindVars, {}, noOpt); extra = result.getExtra(); - // For each vertex not in the list we filter once for every conncted edge + // For each vertex not in the list we filter once for every connected edge assertEqual(extra.stats.filtered, 90 * 100); assertEqual(result.count(), 1000); result = db._query(edgeQuery, bindVars, {}, noOpt); extra = result.getExtra(); - // For each vertex not in the list we filter once for every conncted edge + // For each vertex not in the list we filter once for every connected edge assertEqual(extra.stats.filtered, 90 * 100); assertEqual(result.count(), 1000); result = db._query(mixedQuery1, bindVars, {}, noOpt); extra = result.getExtra(); - // For each vertex not in the list we filter once for every conncted edge + // For each vertex not in the list we filter once for every connected edge assertEqual(extra.stats.filtered, 90 * 100); assertEqual(result.count(), 1000); result = db._query(mixedQuery2, bindVars, {}, noOpt); extra = result.getExtra(); - // For each vertex not in the list we filter once for every conncted edge + // For each vertex not in the list we filter once for every connected edge assertEqual(extra.stats.filtered, 90 * 100); assertEqual(result.count(), 1000); }, diff --git a/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js b/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js index e37eccdd98..93890b123d 100644 --- a/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js +++ b/js/server/tests/aql/aql-optimizer-rule-use-index-for-sort.js @@ -1133,6 +1133,72 @@ function optimizerRuleTestSuite() { } }); assertEqual(2, seen); + }, + + testSortOnSubAttributeAsc : function () { + skiplist.ensureIndex({ type: "skiplist", fields: [ "foo.bar" ], unique: false }); + var query = "FOR v IN " + colName + " SORT v.foo.bar ASC RETURN v"; + var rules = AQL_EXPLAIN(query).plan.rules; + assertNotEqual(-1, rules.indexOf(ruleName)); + + var nodes = AQL_EXPLAIN(query).plan.nodes; + var seen = false; + nodes.forEach(function(node) { + assertNotEqual("SortNode", node.type); + if (node.type === "IndexNode") { + assertEqual(1, node.indexes.length); + assertEqual(["foo.bar"], node.indexes[0].fields); + seen = true; + assertFalse(node.reverse); + } + }); + assertTrue(seen); + }, + + testSortOnSubAttributeDesc : function () { + skiplist.ensureIndex({ type: "skiplist", fields: [ "foo.bar" ], unique: false }); + var query = "FOR v IN " + colName + " SORT v.foo.bar DESC RETURN v"; + var rules = AQL_EXPLAIN(query).plan.rules; + assertNotEqual(-1, rules.indexOf(ruleName)); + + var nodes = AQL_EXPLAIN(query).plan.nodes; + var seen = false; + nodes.forEach(function(node) { + assertNotEqual("SortNode", node.type); + if (node.type === "IndexNode") { + assertEqual(1, node.indexes.length); + assertEqual(["foo.bar"], node.indexes[0].fields); + seen = true; + assertTrue(node.reverse); + } + }); + assertTrue(seen); + }, + + testSortOnNestedSubAttributeAsc : function () { + skiplist.ensureIndex({ type: "skiplist", fields: [ "foo.bar.baz" ], unique: false }); + var query = "FOR v IN " + colName + " SORT v.foo.bar.baz ASC RETURN v"; + var rules = AQL_EXPLAIN(query).plan.rules; + assertNotEqual(-1, rules.indexOf(ruleName)); + + var nodes = AQL_EXPLAIN(query).plan.nodes; + var seen = false; + nodes.forEach(function(node) { + assertNotEqual("SortNode", node.type); + if (node.type === "IndexNode") { + assertEqual(1, node.indexes.length); + assertEqual(["foo.bar.baz"], node.indexes[0].fields); + seen = true; + assertFalse(node.reverse); + } + }); + assertTrue(seen); + }, + + testSortOnNonIndexedSubAttributeAsc : function () { + var query = "FOR v IN " + colName + " SORT v.foo.bar ASC RETURN v"; + var rules = AQL_EXPLAIN(query).plan.rules; + assertEqual(-1, rules.indexOf(ruleName)); } }; diff --git a/js/server/tests/aql/aql-query-cache-noncluster.js b/js/server/tests/aql/aql-query-cache-noncluster.js index a4b77de809..5b087558c2 100644 --- a/js/server/tests/aql/aql-query-cache-noncluster.js +++ b/js/server/tests/aql/aql-query-cache-noncluster.js @@ -881,6 +881,82 @@ function ahuacatlQueryCacheTestSuite () { result = AQL_EXECUTE(query2, { "@collection": c1.name() }); assertTrue(result.cached); assertEqual([ 6, 5, 4, 3, 2, 1 ], result.json); + }, + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test transaction commit +//////////////////////////////////////////////////////////////////////////////// + + testTransactionCommit : function () { + AQL_QUERY_CACHE_PROPERTIES({ mode: "on" }); + + var query = "FOR doc IN @@collection RETURN doc.value"; + var result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertFalse(result.cached); + assertEqual([ ], result.json); + + db._executeTransaction({ + collections: { write: c1.name() }, + action: function(params) { + var db = require("@arangodb").db; + db._collection(params.c1).insert({ value: "foo" }); + }, + params: { c1: c1.name() } + }); + + result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertFalse(result.cached); + assertEqual([ "foo" ], result.json); + + result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertTrue(result.cached); + assertEqual([ "foo" ], result.json); + }, + +//////////////////////////////////////////////////////////////////////////////// +/// @brief test transaction rollback +//////////////////////////////////////////////////////////////////////////////// + + testTransactionRollback : function () { + AQL_QUERY_CACHE_PROPERTIES({ mode: "on" }); + + var query = "FOR doc IN @@collection RETURN doc.value"; + + try { + db._executeTransaction({ + collections: { write: c1.name() }, + action: function(params) { + var result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertFalse(result.cached); + assertEqual([ ], result.json); + + result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertTrue(result.cached); + assertEqual([ ], result.json); + + var db = require("@arangodb").db; + db._collection(params.c1).insert({ value: "foo" }); + + result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertFalse(result.cached); + assertEqual([ "foo" ], result.json); + + result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertTrue(result.cached); + assertEqual([ "foo" ], result.json); + + throw "peng!"; + }, + params: { c1: c1.name() } + }); + fail(); + } catch (err) { + assertEqual("peng!", String(err)); + } + + var result = AQL_EXECUTE(query, { "@collection": c1.name() }); + assertFalse(result.cached); + assertEqual([ ], result.json); } }; diff --git a/js/server/tests/resilience/moving-shards-cluster.js b/js/server/tests/resilience/moving-shards-cluster.js index 9614d7bb79..914734ecf0 100644 --- a/js/server/tests/resilience/moving-shards-cluster.js +++ b/js/server/tests/resilience/moving-shards-cluster.js @@ -87,7 +87,7 @@ function MovingShardsSuite () { database, c[i].name(), s) ); let replicas = ccinfo.map(s => s.servers.length); - if (_.every(replicas, x => x === replFactor)) { + if (_.every(replicas, x => x >= replFactor)) { console.info("Replication up and running!"); break; } diff --git a/js/server/tests/resilience/resilience-synchronous-repl-cluster.js b/js/server/tests/resilience/resilience-synchronous-repl-cluster.js index 5875573fb0..8190e0bb20 100644 --- a/js/server/tests/resilience/resilience-synchronous-repl-cluster.js +++ b/js/server/tests/resilience/resilience-synchronous-repl-cluster.js @@ -77,7 +77,7 @@ function SynchronousReplicationSuite () { s => global.ArangoClusterInfo.getCollectionInfoCurrent(database, cn, s) ); let replicas = ccinfo.map(s => s.servers.length); - if (_.every(replicas, x => x === 2)) { + if (_.every(replicas, x => x > 1)) { console.info("Replication up and running!"); return true; } diff --git a/lib/ApplicationFeatures/ConfigFeature.cpp b/lib/ApplicationFeatures/ConfigFeature.cpp index 390d081486..535b718c50 100644 --- a/lib/ApplicationFeatures/ConfigFeature.cpp +++ b/lib/ApplicationFeatures/ConfigFeature.cpp @@ -190,6 +190,18 @@ void ConfigFeature::loadConfigFile(std::shared_ptr options, LOG_TOPIC(DEBUG, Logger::CONFIG) << "loading '" << filename << "'"; if (!parser.parse(filename)) { + if (filename.empty()) { + size_t i = 0; + std::string locationMsg = "(tried locations: "; + for (auto const& it : locations) { + if (i++ > 0) { + locationMsg += ", "; + } + locationMsg += "'" + FileUtils::buildFilename(it, basename) + "'"; + } + locationMsg += ")"; + options->failNotice(locationMsg); + } exit(EXIT_FAILURE); } } diff --git a/lib/Basics/ReadLocker.h b/lib/Basics/ReadLocker.h index 83efc71e16..526160ec83 100644 --- a/lib/Basics/ReadLocker.h +++ b/lib/Basics/ReadLocker.h @@ -20,45 +20,34 @@ /// /// @author Frank Celler /// @author Achim Brandt +/// @author Jan Steemann //////////////////////////////////////////////////////////////////////////////// #ifndef ARANGODB_BASICS_READ_LOCKER_H #define ARANGODB_BASICS_READ_LOCKER_H 1 #include "Basics/Common.h" +#include "Basics/Locking.h" #include "Basics/ReadWriteLock.h" #ifdef TRI_SHOW_LOCK_TIME #include "Logger/Logger.h" #endif -//////////////////////////////////////////////////////////////////////////////// +#include + /// @brief construct locker with file and line information -/// -/// Ones needs to use macros twice to get a unique variable based on the line -/// number. -//////////////////////////////////////////////////////////////////////////////// - -#ifdef TRI_SHOW_LOCK_TIME - #define READ_LOCKER(obj, lock) \ - arangodb::basics::ReadLocker::type> obj(&lock, __FILE__, __LINE__) + arangodb::basics::ReadLocker::type> obj(&lock, arangodb::basics::LockerType::BLOCKING, true, __FILE__, __LINE__) #define READ_LOCKER_EVENTUAL(obj, lock, t) \ - arangodb::basics::ReadLocker::type> obj(&lock, t, __FILE__, __LINE__) + arangodb::basics::ReadLocker::type> obj(&lock, arangodb::basics::LockerType::EVENTUAL, true, __FILE__, __LINE__) -#else +#define TRY_READ_LOCKER(obj, lock) \ + arangodb::basics::ReadLocker::type> obj(&lock, arangodb::basics::LockerType::TRY, true, __FILE__, __LINE__) -#define READ_LOCKER(obj, lock) arangodb::basics::ReadLocker::type> obj(&lock) - -#define READ_LOCKER_EVENTUAL(obj, lock, t) \ - arangodb::basics::ReadLocker::type> obj(&lock, t) - -#endif - -#define TRY_READ_LOCKER(obj, lock) arangodb::basics::TryReadLocker::type> obj(&lock) - -#define CONDITIONAL_READ_LOCKER(obj, lock, condition) arangodb::basics::ConditionalReadLocker::type> obj(&lock, (condition)) +#define CONDITIONAL_READ_LOCKER(obj, lock, condition) \ + arangodb::basics::ReadLocker::type> obj(&lock, arangodb::basics::LockerType::BLOCKING, (condition), __FILE__, __LINE__) namespace arangodb { namespace basics { @@ -72,45 +61,39 @@ class ReadLocker { ReadLocker& operator=(ReadLocker const&) = delete; public: + /// @brief aquires a read-lock + /// The constructors acquire a read lock, the destructor unlocks the lock. + ReadLocker(LockType* readWriteLock, LockerType type, bool condition, char const* file, int line) + : _readWriteLock(readWriteLock), _file(file), _line(line), #ifdef TRI_SHOW_LOCK_TIME - - /// @brief acquires a read-lock - /// The constructors read-locks the lock, the destructors unlocks the lock. - ReadLocker(LockType* readWriteLock, char const* file, int line) - : _readWriteLock(readWriteLock), _file(file), _line(line), _isLocked(false) { - double t = TRI_microtime(); - lock(); - _time = TRI_microtime() - t; - } - - /// @brief acquires a read-lock, with periodic sleeps while not acquired - /// sleep time is specified in nanoseconds - ReadLocker(LockType* readWriteLock, uint64_t sleepTime, char const* file, - int line) - : _readWriteLock(readWriteLock), _file(file), _line(line), _isLocked(false) { - double t = TRI_microtime(); - lockEventual(sleepTime); - _time = TRI_microtime() - t; - } - + _isLocked(false), _time(0.0) { #else - - /// @brief acquires a read-lock - /// The constructors read-locks the lock, the destructors unlocks the lock. - explicit ReadLocker(LockType* readWriteLock) - : _readWriteLock(readWriteLock), _isLocked(false) { - lock(); - } - - /// @brief acquires a read-lock, with periodic sleeps while not acquired - /// sleep time is specified in nanoseconds - ReadLocker(LockType* readWriteLock, uint64_t sleepTime) - : _readWriteLock(readWriteLock), _isLocked(false) { - lockEventual(sleepTime); - } - + _isLocked(false) { #endif +#ifdef TRI_SHOW_LOCK_TIME + // fetch current time + double t = TRI_microtime(); +#endif + + if (condition) { + if (type == LockerType::BLOCKING) { + lock(); + TRI_ASSERT(_isLocked); + } else if (type == LockerType::EVENTUAL) { + lockEventual(); + TRI_ASSERT(_isLocked); + } else if (type == LockerType::TRY) { + _isLocked = tryLock(); + } + } + +#ifdef TRI_SHOW_LOCK_TIME + // add elapsed time to time tracker + _time = TRI_microtime() - t; +#endif + } + /// @brief releases the read-lock ~ReadLocker() { if (_isLocked) { @@ -119,7 +102,7 @@ class ReadLocker { #ifdef TRI_SHOW_LOCK_TIME if (_time > TRI_SHOW_LOCK_THRESHOLD) { - LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "ReadLocker " << _file << ":" << _line << " took " << _time << " s"; + LOG_TOPIC(WARN, arangodb::Logger::PERFORMANCE) << "ReadLocker " << _file << ":" << _line << " took " << _time << " s"; } #endif } @@ -128,17 +111,19 @@ class ReadLocker { bool isLocked() const { return _isLocked; } /// @brief eventually acquire the read lock - void lockEventual(uint64_t sleepTime) { - TRI_ASSERT(!_isLocked); - - while (!_readWriteLock->tryReadLock()) { -#ifdef _WIN32 - usleep((unsigned long)sleepTime); -#else - usleep((useconds_t)sleepTime); -#endif + void lockEventual() { + while (!tryLock()) { + std::this_thread::yield(); } - _isLocked = true; + TRI_ASSERT(_isLocked); + } + + bool tryLock() { + TRI_ASSERT(!_isLocked); + if (_readWriteLock->tryReadLock()) { + _isLocked = true; + } + return _isLocked; } /// @brief acquire the read lock, blocking @@ -171,168 +156,21 @@ class ReadLocker { /// @brief the read-write lock LockType* _readWriteLock; -#ifdef TRI_SHOW_LOCK_TIME - /// @brief file char const* _file; /// @brief line number int _line; + /// @brief whether or not we acquired the lock + bool _isLocked; + +#ifdef TRI_SHOW_LOCK_TIME /// @brief lock time double _time; #endif - - /// @brief whether or not we acquired the lock - bool _isLocked; }; -template -class TryReadLocker { - TryReadLocker(TryReadLocker const&) = delete; - TryReadLocker& operator=(TryReadLocker const&) = delete; - - public: - /// @brief tries to acquire a read-lock - /// The constructor tries to read-lock the lock, the destructors unlocks the - /// lock if it was acquired in the constructor - explicit TryReadLocker(LockType* readWriteLock) - : _readWriteLock(readWriteLock), _isLocked(false) { - _isLocked = _readWriteLock->tryReadLock(); - } - - /// @brief releases the read-lock - ~TryReadLocker() { - if (_isLocked) { - _readWriteLock->unlock(); - } - } - - /// @brief whether or not we acquired the lock - bool isLocked() const { return _isLocked; } - - /// @brief eventually acquire the read lock - void lockEventual(uint64_t sleepTime) { - TRI_ASSERT(!_isLocked); - - while (!_readWriteLock->tryReadLock()) { -#ifdef _WIN32 - usleep((unsigned long)sleepTime); -#else - usleep((useconds_t)sleepTime); -#endif - } - _isLocked = true; - } - - /// @brief acquire the read lock, blocking - void lock() { - TRI_ASSERT(!_isLocked); - _readWriteLock->readLock(); - _isLocked = true; - } - - /// @brief unlocks the read-write lock - bool unlock() { - if (_isLocked) { - _readWriteLock->unlock(); - _isLocked = false; - return true; - } - return false; - } - - /// @brief steals the lock, but does not unlock it - bool steal() { - if (_isLocked) { - _isLocked = false; - return true; - } - return false; - } - - private: - /// @brief the read-write lock - LockType* _readWriteLock; - - /// @brief whether or not we acquired the lock - bool _isLocked; -}; - -template -class ConditionalReadLocker { - ConditionalReadLocker(ConditionalReadLocker const&) = delete; - ConditionalReadLocker& operator=(ConditionalReadLocker const&) = delete; - - public: - /// @brief acquire a read-lock - /// The constructor tries to read-lock the lock, the destructors unlocks the - /// lock if it was acquired in the constructor - ConditionalReadLocker(LockType* readWriteLock, bool condition) - : _readWriteLock(readWriteLock), _isLocked(false) { - if (condition) { - _readWriteLock->readLock(); - _isLocked = true; - } - } - - /// @brief releases the read-lock - ~ConditionalReadLocker() { - if (_isLocked) { - _readWriteLock->unlock(); - } - } - - /// @brief whether or not we acquired the lock - bool isLocked() const { return _isLocked; } - - /// @brief eventually acquire the read lock - void lockEventual(uint64_t sleepTime) { - TRI_ASSERT(!_isLocked); - - while (!_readWriteLock->tryReadLock()) { -#ifdef _WIN32 - usleep((unsigned long)sleepTime); -#else - usleep((useconds_t)sleepTime); -#endif - } - _isLocked = true; - } - - /// @brief acquire the read lock, blocking - void lock() { - TRI_ASSERT(!_isLocked); - _readWriteLock->readLock(); - _isLocked = true; - } - - /// @brief unlocks the read-write lock - bool unlock() { - if (_isLocked) { - _readWriteLock->unlock(); - _isLocked = false; - return true; - } - return false; - } - - /// @brief steals the lock, but does not unlock it - bool steal() { - if (_isLocked) { - _isLocked = false; - return true; - } - return false; - } - - private: - /// @brief the read-write lock - LockType* _readWriteLock; - - /// @brief whether or not we acquired the lock - bool _isLocked; -}; } } diff --git a/lib/Basics/VelocyPackHelper.cpp b/lib/Basics/VelocyPackHelper.cpp index 5f7502673b..ebc79e7adf 100644 --- a/lib/Basics/VelocyPackHelper.cpp +++ b/lib/Basics/VelocyPackHelper.cpp @@ -731,10 +731,6 @@ int VelocyPackHelper::compare(VPackSlice lhs, VPackSlice rhs, bool useUTF8, auto lhsType = lhs.type(); switch (lhsType) { - case VPackValueType::Illegal: - case VPackValueType::MinKey: - case VPackValueType::MaxKey: - case VPackValueType::None: case VPackValueType::Null: return 0; case VPackValueType::Bool: { @@ -811,29 +807,61 @@ int VelocyPackHelper::compare(VPackSlice lhs, VPackSlice rhs, bool useUTF8, return 0; } case VPackValueType::Object: { - std::set keys; - VPackCollection::keys(lhs, keys); - VPackCollection::keys(rhs, keys); - for (auto const& key : keys) { - VPackSlice lhsValue = lhs.get(key).resolveExternal(); - if (lhsValue.isNone()) { - // not present => null - lhsValue = VPackSlice::nullSlice(); - } - VPackSlice rhsValue = rhs.get(key).resolveExternal(); - if (rhsValue.isNone()) { - // not present => null - rhsValue = VPackSlice::nullSlice(); - } + if (useUTF8) { + // must sort attributes by proper UTF8 values + // this is expensive + std::set keys; + VPackCollection::keys(lhs, keys); + VPackCollection::keys(rhs, keys); + for (auto const& key : keys) { + VPackSlice lhsValue = lhs.get(key).resolveExternal(); + if (lhsValue.isNone()) { + // not present => null + lhsValue = VPackSlice::nullSlice(); + } + VPackSlice rhsValue = rhs.get(key).resolveExternal(); + if (rhsValue.isNone()) { + // not present => null + rhsValue = VPackSlice::nullSlice(); + } - int result = compare(lhsValue, rhsValue, useUTF8, options, &lhs, &rhs); - if (result != 0) { - return result; + int result = compare(lhsValue, rhsValue, useUTF8, options, &lhs, &rhs); + if (result != 0) { + return result; + } + } + } else { + // no UTF8-awareness is required here. do a quick and dirty comparison + std::set keys; + VPackCollection::unorderedKeys(lhs, keys); + VPackCollection::unorderedKeys(rhs, keys); + for (auto const& key : keys) { + VPackSlice lhsValue = lhs.get(key).resolveExternal(); + if (lhsValue.isNone()) { + // not present => null + lhsValue = VPackSlice::nullSlice(); + } + VPackSlice rhsValue = rhs.get(key).resolveExternal(); + if (rhsValue.isNone()) { + // not present => null + rhsValue = VPackSlice::nullSlice(); + } + + int result = compare(lhsValue, rhsValue, useUTF8, options, &lhs, &rhs); + if (result != 0) { + return result; + } } } return 0; } + case VPackValueType::Illegal: + case VPackValueType::MinKey: + case VPackValueType::MaxKey: + case VPackValueType::None: + // uncommon cases are compared at the end + return 0; default: // Contains all other ValueTypes of VelocyPack. // They are not used in ArangoDB so this cannot occur @@ -970,41 +998,20 @@ uint64_t VelocyPackHelper::hashByAttributes( } #endif -void VelocyPackHelper::SanitizeExternals(VPackSlice const input, - VPackBuilder& output) { +bool VelocyPackHelper::hasNonClientTypes(VPackSlice input, bool checkExternals, bool checkCustom) { if (input.isExternal()) { - // recursively resolve externals - SanitizeExternals(input.resolveExternal(), output); + return checkExternals; + } else if (input.isCustom()) { + return checkCustom; } else if (input.isObject()) { - output.openObject(); - for (auto const& it : VPackObjectIterator(input)) { - output.add(VPackValue(it.key.copyString())); - SanitizeExternals(it.value, output); - } - output.close(); - } else if (input.isArray()) { - output.openArray(); - for (auto const& it : VPackArrayIterator(input)) { - SanitizeExternals(it, output); - } - output.close(); - } else { - output.add(input); - } -} - -bool VelocyPackHelper::hasExternals(VPackSlice input) { - if (input.isExternal()) { - return true; - } else if (input.isObject()) { - for (auto const& it : VPackObjectIterator(input)) { - if (hasExternals(it.value)) { + for (auto const& it : VPackObjectIterator(input, true)) { + if (hasNonClientTypes(it.value, checkExternals, checkCustom)) { return true; } } } else if (input.isArray()) { for (auto const& it : VPackArrayIterator(input)) { - if (hasExternals(it)) { + if (hasNonClientTypes(it, checkExternals, checkCustom)) { return true; } } @@ -1012,16 +1019,50 @@ bool VelocyPackHelper::hasExternals(VPackSlice input) { return false; } -VPackBuffer VelocyPackHelper::sanitizeExternalsChecked( - VPackSlice input, VPackOptions const* options, bool checkExternals) { +void VelocyPackHelper::sanitizeNonClientTypes(VPackSlice input, + VPackSlice base, + VPackBuilder& output, + VPackOptions const* options, + bool sanitizeExternals, + bool sanitizeCustom) { + if (sanitizeExternals && input.isExternal()) { + // recursively resolve externals + sanitizeNonClientTypes(input.resolveExternal(), base, output, options, sanitizeExternals, sanitizeCustom); + } else if (sanitizeCustom && input.isCustom()) { + if (options == nullptr || options->customTypeHandler == nullptr) { + THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot sanitize vpack without custom type handler"); + } + std::string custom = options->customTypeHandler->toString(input, options, base); + output.add(VPackValue(custom)); + } else if (input.isObject()) { + output.openObject(); + for (auto const& it : VPackObjectIterator(input)) { + output.add(VPackValue(it.key.copyString())); + sanitizeNonClientTypes(it.value, input, output, options, sanitizeExternals, sanitizeCustom); + } + output.close(); + } else if (input.isArray()) { + output.openArray(); + for (auto const& it : VPackArrayIterator(input)) { + sanitizeNonClientTypes(it, input, output, options, sanitizeExternals, sanitizeCustom); + } + output.close(); + } else { + output.add(input); + } +} + +VPackBuffer VelocyPackHelper::sanitizeNonClientTypesChecked( + VPackSlice input, VPackOptions const* options, bool sanitizeExternals, bool sanitizeCustom) { VPackBuffer buffer; VPackBuilder builder(buffer, options); bool resolveExt = true; - if (checkExternals) { - resolveExt = hasExternals(input); + if (sanitizeExternals) { + resolveExt = hasNonClientTypes(input, sanitizeExternals, sanitizeCustom); } if (resolveExt) { // resolve - SanitizeExternals(input, builder); + buffer.reserve(input.byteSize()); // reserve space space already + sanitizeNonClientTypes(input, VPackSlice::noneSlice(), builder, options, sanitizeExternals, sanitizeCustom); } else { builder.add(input); } diff --git a/lib/Basics/VelocyPackHelper.h b/lib/Basics/VelocyPackHelper.h index 60c4caacba..69703594fd 100644 --- a/lib/Basics/VelocyPackHelper.h +++ b/lib/Basics/VelocyPackHelper.h @@ -427,16 +427,20 @@ class VelocyPackHelper { static constexpr arangodb::velocypack::Slice IllegalValue() { return arangodb::velocypack::Slice::illegalSlice(); } + + static bool hasNonClientTypes(arangodb::velocypack::Slice, bool checkExternals, bool checkCustom); - static void SanitizeExternals(arangodb::velocypack::Slice const, - arangodb::velocypack::Builder&); + static void sanitizeNonClientTypes(arangodb::velocypack::Slice input, + arangodb::velocypack::Slice base, + arangodb::velocypack::Builder& output, + arangodb::velocypack::Options const*, + bool sanitizeExternals, bool sanitizeCustom); - static bool hasExternals(arangodb::velocypack::Slice const); - - static VPackBuffer sanitizeExternalsChecked( - arangodb::velocypack::Slice const, + static VPackBuffer sanitizeNonClientTypesChecked( + arangodb::velocypack::Slice, VPackOptions const* options = &VPackOptions::Options::Defaults, - bool checkExternals = true); + bool sanitizeExternals = true, + bool sanitizeCustom = true); static uint64_t extractIdValue(VPackSlice const& slice); diff --git a/lib/Basics/WriteLocker.h b/lib/Basics/WriteLocker.h index ad76c49c59..13b974e49d 100644 --- a/lib/Basics/WriteLocker.h +++ b/lib/Basics/WriteLocker.h @@ -20,6 +20,7 @@ /// /// @author Frank Celler /// @author Achim Brandt +/// @author Jan Steemann //////////////////////////////////////////////////////////////////////////////// #ifndef ARANGODB_BASICS_WRITE_LOCKER_H @@ -35,10 +36,7 @@ #include -//////////////////////////////////////////////////////////////////////////////// /// @brief construct locker with file and line information -//////////////////////////////////////////////////////////////////////////////// - #define WRITE_LOCKER(obj, lock) \ arangodb::basics::WriteLocker::type> obj(&lock, arangodb::basics::LockerType::BLOCKING, true, __FILE__, __LINE__) @@ -115,10 +113,10 @@ class WriteLocker { /// @brief eventually acquire the write lock void lockEventual() { - while (!_readWriteLock->tryWriteLock()) { + while (!tryLock()) { std::this_thread::yield(); } - _isLocked = true; + TRI_ASSERT(_isLocked); } bool tryLock() { diff --git a/lib/Basics/shell-colors.h b/lib/Basics/shell-colors.h index f043376814..e1c4f2a183 100644 --- a/lib/Basics/shell-colors.h +++ b/lib/Basics/shell-colors.h @@ -24,118 +24,92 @@ #ifndef ARANGODB_BASICS_SHELL__COLORS_H #define ARANGODB_BASICS_SHELL__COLORS_H 1 -//////////////////////////////////////////////////////////////////////////////// -/// @brief color red -//////////////////////////////////////////////////////////////////////////////// +#ifdef _WIN32 +/// @brief disable escape sequences on Windows +/// because MS-DOS does not support them +/// the proper fix for this would be to define or +/// not define the color codes depending on the +/// type of terminal used +#define TRI_SHELL_COLOR_RED "" +#define TRI_SHELL_COLOR_BOLD_RED "" +#define TRI_SHELL_COLOR_GREEN "" +#define TRI_SHELL_COLOR_BOLD_GREEN "" +#define TRI_SHELL_COLOR_BLUE "" +#define TRI_SHELL_COLOR_BOLD_BLUE "" +#define TRI_SHELL_COLOR_YELLOW "" +#define TRI_SHELL_COLOR_BOLD_YELLOW "" +#define TRI_SHELL_COLOR_WHITE "" +#define TRI_SHELL_COLOR_BOLD_WHITE "" +#define TRI_SHELL_COLOR_BLACK "" +#define TRI_SHELL_COLOR_BOLD_BLACK "" +#define TRI_SHELL_COLOR_CYAN "" +#define TRI_SHELL_COLOR_BOLD_CYAN "" +#define TRI_SHELL_COLOR_MAGENTA "" +#define TRI_SHELL_COLOR_BOLD_MAGENTA "" +#define TRI_SHELL_COLOR_BLINK "" +#define TRI_SHELL_COLOR_BRIGHT "" +#define TRI_SHELL_COLOR_RESET "" + +#else + +/// @brief color red #define TRI_SHELL_COLOR_RED "\x1b[31m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold red -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_RED "\x1b[1;31m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color green -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_GREEN "\x1b[32m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold green -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_GREEN "\x1b[1;32m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color blue -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BLUE "\x1b[34m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold blue -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_BLUE "\x1b[1;34m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color yellow -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_YELLOW "\x1b[33m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color yellow -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_YELLOW "\x1b[1;33m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color white -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_WHITE "\x1b[37m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold white -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_WHITE "\x1b[1;37m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color black -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BLACK "\x1b[30m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold black -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_BLACK "\x1b[1;30m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color cyan -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_CYAN "\x1b[36m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold cyan -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_CYAN "\x1b[1;36m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color magenta -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_MAGENTA "\x1b[35m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bold magenta -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BOLD_MAGENTA "\x1b[1;35m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color blink -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BLINK "\x1b[5m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color bright -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_BRIGHT "\x1b[1m" -//////////////////////////////////////////////////////////////////////////////// /// @brief color reset -//////////////////////////////////////////////////////////////////////////////// - #define TRI_SHELL_COLOR_RESET "\x1b[0m" #endif + +#endif diff --git a/lib/Logger/Logger.h b/lib/Logger/Logger.h index 0480be31a1..94ce62f408 100644 --- a/lib/Logger/Logger.h +++ b/lib/Logger/Logger.h @@ -175,8 +175,8 @@ class Logger { }; struct LINE { - explicit LINE(long int line) : _line(line){} - long int _line; + explicit LINE(int line) : _line(line){} + int _line; }; struct FILE { diff --git a/lib/Logger/LoggerStream.h b/lib/Logger/LoggerStream.h index 68b42d1a8d..7dab9306c8 100644 --- a/lib/Logger/LoggerStream.h +++ b/lib/Logger/LoggerStream.h @@ -81,6 +81,12 @@ class LoggerStream { _out << obj; return *this; } + + template + LoggerStream& operator<<(std::pair const& obj) { + _out << '(' << obj.first << ", " << obj.second << ')'; + return *this; + } template LoggerStream& operator<<(std::vector const& obj) { @@ -132,7 +138,7 @@ class LoggerStream { std::stringstream _out; size_t _topicId; LogLevel _level; - long int _line; + int _line; char const* _file; char const* _function; }; diff --git a/lib/ProgramOptions/IniFileParser.h b/lib/ProgramOptions/IniFileParser.h index 57430d77ac..acd09bc964 100644 --- a/lib/ProgramOptions/IniFileParser.h +++ b/lib/ProgramOptions/IniFileParser.h @@ -23,7 +23,7 @@ #ifndef ARANGODB_PROGRAM_OPTIONS_INI_FILE_PARSER_H #define ARANGODB_PROGRAM_OPTIONS_INI_FILE_PARSER_H 1 -#include "Basics/Common.h" +#include "Basics/FileUtils.h" #include #include @@ -64,6 +64,10 @@ class IniFileParser { // parse a config file. returns true if all is well, false otherwise // errors that occur during parse are reported to _options bool parse(std::string const& filename) { + if (filename.empty()) { + return _options->fail("unable to open configuration file: no configuration file specified"); + } + std::ifstream ifs(filename, std::ifstream::in); if (!ifs.is_open()) { @@ -114,7 +118,6 @@ class IniFileParser { if (!basics::StringUtils::isSuffix(include, ".conf")) { include += ".conf"; } - if (_seen.find(include) != _seen.end()) { LOG_TOPIC(FATAL, Logger::CONFIG) << "recursive include of file '" << include << "'"; @@ -123,6 +126,11 @@ class IniFileParser { _seen.insert(include); + if (!basics::FileUtils::isRegularFile(include)) { + auto dn = basics::FileUtils::dirname(filename); + include = basics::FileUtils::buildFilename(dn, include); + } + LOG_TOPIC(DEBUG, Logger::CONFIG) << "reading include file '" << include << "'"; diff --git a/lib/ProgramOptions/ProgramOptions.h b/lib/ProgramOptions/ProgramOptions.h index 33e4f95770..5b3dd09b82 100644 --- a/lib/ProgramOptions/ProgramOptions.h +++ b/lib/ProgramOptions/ProgramOptions.h @@ -507,11 +507,18 @@ class ProgramOptions { // report an error (callback from parser) bool fail(std::string const& message) { - std::cerr << "Error while processing " << _context << ":" << std::endl; - std::cerr << " " << message << std::endl << std::endl; _processingResult.failed(true); + std::cerr << "Error while processing " << _context << ":" << std::endl; + failNotice(message); + std::cerr << std::endl; return false; } + + void failNotice(std::string const& message) { + // only allowed to call if we already failed + TRI_ASSERT(_processingResult.failed()); + std::cerr << " " << message << std::endl; + } // add a positional argument (callback from parser) void addPositional(std::string const& value) { diff --git a/lib/Rest/GeneralResponse.cpp b/lib/Rest/GeneralResponse.cpp index abbf65e92c..bdf211734a 100644 --- a/lib/Rest/GeneralResponse.cpp +++ b/lib/Rest/GeneralResponse.cpp @@ -52,7 +52,7 @@ void GeneralResponse::addPayload(VPackSlice const& slice, if (!skipBody) { if (resolveExternals) { auto tmpBuffer = - basics::VelocyPackHelper::sanitizeExternalsChecked(slice, options); + basics::VelocyPackHelper::sanitizeNonClientTypesChecked(slice, options); _vpackPayloads.push_back(std::move(tmpBuffer)); } else { // just copy @@ -76,7 +76,7 @@ void GeneralResponse::addPayload(VPackBuffer&& buffer, addPayloadPreHook(true, resolveExternals, skipBody); if (!skipBody) { if (resolveExternals) { - auto tmpBuffer = basics::VelocyPackHelper::sanitizeExternalsChecked( + auto tmpBuffer = basics::VelocyPackHelper::sanitizeNonClientTypesChecked( VPackSlice(buffer.data()), options); _vpackPayloads.push_back(std::move(tmpBuffer)); } else {