1
0
Fork 0

Merge branch 'engine-api' of github.com:arangodb/arangodb into engine-api

This commit is contained in:
Michael Hackstein 2017-02-27 13:01:12 +01:00
commit ae14ae99a5
226 changed files with 9501 additions and 1822 deletions

7
.gitignore vendored
View File

@ -30,6 +30,7 @@ compile_commands.json
instanceinfo.json
testresult.json
testsStarted
soc-pokec-*
build.sh
build*/
@ -99,12 +100,6 @@ js/apps/system/_admin/aardvark/APP/frontend/build/scripts.html.part
js/common/tests/shell/shell-database.js
3rdParty/boost/1.61.0/b2
3rdParty/boost/1.61.0/bin.v2/
3rdParty/boost/1.61.0/bjam
3rdParty/boost/1.61.0/project-config.jam
3rdParty/boost/1.61.0/stage/
.gdb-history
npm-debug.log

View File

@ -11,6 +11,7 @@ branches:
- "2.8"
- "3.0"
- "3.1"
- "3.2"
language: cpp
cache: ccache

View File

@ -578,14 +578,24 @@ set(ICU_DT ${ICU_DT} PARENT_SCOPE)
set(ICU_DT_DEST "icudtl.dat" )
set(ICU_DT_DEST ${ICU_DT_DEST} PARENT_SCOPE)
configure_file(
"${ICU_DT}"
"${CMAKE_BINARY_DIR}/bin/${CONFIGURATION}/${ICU_DT_DEST}"
COPYONLY)
configure_file(
"${ICU_DT}"
"${CMAKE_BINARY_DIR}/tests/${CONFIGURATION}/${ICU_DT_DEST}"
COPYONLY)
if (MSVC)
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/bin/$<CONFIG>/${ICU_DT_DEST})
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/tests/$<CONFIG>/${ICU_DT_DEST})
else()
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/bin/${ICU_DT_DEST})
add_custom_command(
TARGET v8_build POST_BUILD
COMMAND ${CMAKE_COMMAND} -E copy ${ICU_DT} ${CMAKE_BINARY_DIR}/tests/${ICU_DT_DEST})
endif()
if (NOT WIN32)
add_custom_target(nonthinV8
COMMAND ${CMAKE_CURRENT_SOURCE_DIR}/../../Installation/archive-de-thinner.sh ${V8_REL_TARGET_PATH}

View File

@ -113,11 +113,16 @@ class Collection {
ObjectIterator it(slice);
while (it.valid()) {
result.emplace(std::move(it.key(true).copyString()));
result.emplace(it.key(true).copyString());
it.next();
}
}
template<typename T>
static void keys(Slice const* slice, T& result) {
return keys(*slice, result);
}
static void keys(Slice const& slice, std::vector<std::string>& result) {
// pre-allocate result vector
result.reserve(checkOverflow(slice.length()));
@ -130,9 +135,19 @@ class Collection {
}
}
template<typename T>
static void unorderedKeys(Slice const& slice, T& result) {
ObjectIterator it(slice, true);
while (it.valid()) {
result.emplace(it.key(true).copyString());
it.next();
}
}
template<typename T>
static void keys(Slice const* slice, T& result) {
return keys(*slice, result);
static void unorderedKeys(Slice const* slice, T& result) {
return unorderedKeys(*slice, result);
}
static Builder extract(Slice const& slice, int64_t from, int64_t to = INT64_MAX);

View File

@ -1,6 +1,12 @@
devel
-----
* don't let read-only transactions block the WAL collector
v3.2.alpha2 (2017-02-20)
------------------------
* ui: fixed issue #2065
* ui: fixed a dashboard related memory issue
@ -10,12 +16,12 @@ devel
* Removed undocumented internal HTTP API:
* PUT _api/edges
The documented GET _api/edges and the undocumented POST _api/edges remains unmodified.
* moved V8 code into a git submodule
this requires running the command
git submodule update --init --recursive
once after a source code update or fresh checkout
@ -35,16 +41,22 @@ devel
arangoexport can be used to export collections to json, jsonl or xml
and export a graph or collections to xgmml.
* fixed a race condition when closing a connection
* raised default hard limit on threads for very small to 64
* fixed negative counting of http connection in UI
v3.2.alpha1 (2017-02-05)
------------------------
* added figure `httpRequests` to AQL query statistics
* removed revisions cache intermediate layer implementation
* obsoleted startup options `--database.revision-cache-chunk-size` and
`--database.revision-cache-target-size`
`--database.revision-cache-target-size`
* fix potential port number over-/underruns
@ -58,7 +70,29 @@ v3.2.alpha1 (2017-02-05)
* more detailed stacktraces in Foxx apps
v3.1.11 (2017-02-14)
v3.1.12 (XXXX-XX-XX)
--------------------
* disable shell color escape sequences on Windows
* fixed issue #2326
* fixed issue #2320
* fixed issue #2315
* fixed a race condition when closing a connection
* raised default hard limit on threads for very small to 64
* fixed negative counting of http connection in UI
* fixed a race when renaming collections
* fixed a race when dropping databases
v3.1.11 (2017-02-17)
--------------------
* fixed a race between connection closing and sending out last chunks of data to clients
@ -168,9 +202,10 @@ shards.
* added server startup option `--query.memory-limit`
* added convenience function to create vertex-centric indexes.
Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})`
That will create an index that can be used on OUTBOUND with filtering on the
edge attribute `label`.
Usage: `db.collection.ensureVertexCentricIndex("label", {type: "hash", direction: "outbound"})`
That will create an index that can be used on OUTBOUND with filtering on the
edge attribute `label`.
* change default log output for tools to stdout (instead of stderr)
@ -641,6 +676,8 @@ v3.1.alpha2 (2016-09-01)
v3.0.13 (XXXX-XX-XX)
--------------------
* fixed issue #2315
* fixed issue #2210

View File

@ -97,7 +97,7 @@ set(ARANGODB_FRIENDLY_STRING "ArangoDB - the multi-model database")
set(ARANGO_BENCH_FRIENDLY_STRING "arangobench - stress test program")
set(ARANGO_DUMP_FRIENDLY_STRING "arangodump - export")
set(ARANGO_RESTORE_FRIENDLY_STRING "arangrestore - importer")
set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - dataexporter")
set(ARANGO_EXPORT_FRIENDLY_STRING "arangoexport - datae xporter")
set(ARANGO_IMP_FRIENDLY_STRING "arangoimp - TSV/CSV/JSON importer")
set(ARANGOSH_FRIENDLY_STRING "arangosh - commandline client")
set(ARANGO_VPACK_FRIENDLY_STRING "arangovpack - vpack printer")
@ -118,6 +118,7 @@ set(BIN_ARANGOVPACK arangovpack)
# test binaries
set(TEST_BASICS_SUITE basics_suite)
set(TEST_CACHE_SUITE cache_suite)
set(TEST_GEO_SUITE geo_suite)
set(CLEAN_AUTOGENERATED_FILES)
set(PACKAGES_LIST)
@ -167,7 +168,7 @@ find_program (GIT_EXE git)
if (DEFINED GIT_EXE AND IS_DIRECTORY "${CMAKE_SOURCE_DIR}/.git")
execute_process(
WORKING_DIRECTORY ${CMAKE_SOURCE_DIR}
COMMAND ${GIT_EXE} describe --all --tags --long --dirty=-dirty
OUTPUT_VARIABLE GIT_OUTPUT)
@ -281,7 +282,7 @@ endif ()
math(EXPR BITS "8*${CMAKE_SIZEOF_VOID_P}")
add_definitions("-DARANGODB_BITS=${BITS}")
################################################################################
## COMPILER FEATURES
################################################################################
@ -346,12 +347,12 @@ if (MSVC)
add_definitions("-D_CRT_SECURE_NO_WARNINGS=1")
add_definitions("-DFD_SETSIZE=8192")
add_definitions("-DU_STATIC_IMPLEMENTATION=1")
# https://blogs.msdn.microsoft.com/vcblog/2016/04/14/stl-fixes-in-vs-2015-update-2/
# https://connect.microsoft.com/VisualStudio/feedback/details/1892487
# http://lists.boost.org/boost-users/2016/04/85968.php
add_definitions("-D_ENABLE_ATOMIC_ALIGNMENT_FIX")
set(MSVC_LIBS Shlwapi.lib;crypt32.lib;WINMM.LIB;Ws2_32.lib)
set(CMAKE_EXE_LINKER_FLAGS
@ -379,7 +380,7 @@ if (APPLE)
endif ()
endif ()
if (USE_LOCAL_CLOCK_GETTIME)
if (USE_LOCAL_CLOCK_GETTIME)
message(STATUS "using a home-made clock_gettime")
endif ()
endif ()
@ -933,6 +934,7 @@ add_subdirectory(Documentation)
add_dependencies(arangobench zlibstatic)
add_dependencies(arangod zlibstatic)
add_dependencies(arangodump zlibstatic)
add_dependencies(arangoexport zlibstatic)
add_dependencies(arangoimp zlibstatic)
add_dependencies(arangorestore zlibstatic)
add_dependencies(arangosh zlibstatic)
@ -942,6 +944,7 @@ if (NOT USE_PRECOMPILED_V8)
add_dependencies(arangobench v8_build)
add_dependencies(arangod v8_build)
add_dependencies(arangodump v8_build)
add_dependencies(arangoexport v8_build)
add_dependencies(arangoimp v8_build)
add_dependencies(arangorestore v8_build)
add_dependencies(arangosh v8_build)

View File

@ -48,7 +48,7 @@ Export JSONL
unix> arangoexport --type jsonl --collection test
This exports the collection *test* into the output directory *export* as jsonl. Every line in the export is one document from the collection *test* as json.
This exports the collection *test* into the output directory *export* as [jsonl](http://jsonlines.org). Every line in the export is one document from the collection *test* as json.
Export XML
----------

View File

@ -71,6 +71,43 @@ ArangoDB can also do a so called *broadcast bind* using
host. This may be useful on development systems that frequently change their
network setup like laptops.
### Special note on IPv6 link-local addresses
ArangoDB can also listen to IPv6 link-local addresses via adding the zone ID
to the IPv6 address in the form `[ipv6-link-local-address%zone-id]`. However,
what you probably instead want is to bind to a local IPv6 address. Local IPv6
addresses start with `fd`. If you only see a `fe80:` IPv6 address in your
interface configuration but no IPv6 address starting with `fd` your interface
has no local IPv6 address assigned. You can read more about IPv6 link-local
addresses [here](https://en.wikipedia.org/wiki/Link-local_address#IPv6).
** Example **
Bind to a link-local and local IPv6 address.
unix> ifconfig
This command lists all interfaces and assigned ip addresses. The link-local
address may be `fe80::6257:18ff:fe82:3ec6%eth0` (IPv6 address plus interface name).
A local IPv6 address may be `fd12:3456::789a`. To bind ArangoDB to it start
*arangod* with `--server.endpoint tcp://[fe80::6257:18ff:fe82:3ec6%eth0]:8529`.
Use telnet to test the connection.
unix> telnet fe80::6257:18ff:fe82:3ec6%eth0 8529
Trying fe80::6257:18ff:fe82:3ec6...
Connected to my-machine.
Escape character is '^]'.
GET / HTTP/1.1
HTTP/1.1 301 Moved Permanently
Location: /_db/_system/_admin/aardvark/index.html
Content-Type: text/html
Server: ArangoDB
Connection: Keep-Alive
Content-Length: 197
<html><head><title>Moved</title></head><body><h1>Moved</h1><p>This page has moved to <a href="/_db/_system/_admin/aardvark/index.html">/_db/_system/_admin/aardvark/index.html</a>.</p></body></html>
### Reuse address

View File

@ -330,7 +330,11 @@ while [ $# -gt 0 ]; do
--targetDir)
shift
TARGET_DIR=$1
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=$1"
if test "`uname -o||true`" == "Cygwin"; then
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=`cygpath --windows $1`"
else
CONFIGURE_OPTIONS="${CONFIGURE_OPTIONS} -DPACKAGE_TARGET_DIR=$1"
fi
shift
;;

View File

@ -1,5 +1,5 @@
name: @CPACK_PACKAGE_NAME@
version: @CPACK_PACKAGE_VERSION@
version: @CPACK_PACKAGE_VERSION@-@ARANGODB_PACKAGE_REVISION@
description: "ArangoDB is a native multi-model database with flexible data models for
documents, graphs, and key-values. Build high performance applications using a convenient
SQL-like query language or JavaScript extensions. https://www.arangodb.com"
@ -15,9 +15,9 @@ grade: stable
parts:
@CPACK_PACKAGE_NAME@:
source: @CPACK_PACKAGE_TGZ@
source: @CMAKE_BINARY_DIR@/@CPACK_PACKAGE_FILE_NAME@.tar.gz
plugin: dump
snap:
prime:
- -etc
- -var
- -lib

View File

@ -366,6 +366,34 @@ via the environment variable or in the menu. Given we want to store the symbols
You then will be able to see stack traces in the debugger.
You may also try to download the symbols manually using:
symchk.exe arangod.exe /s SRV*e:/symbol_cache/cache*https://www.arangodb.com/repositories/symsrv/
The symbolserver over at https://www.arangodb.com/repositories/symsrv/ is browseable; thus you can easily download the files you need by hand. It contains of a list of directories corosponding to the components of arangodb:
- arango - the basic arangodb library needed by all components
- arango_v8 - the basic V8 wrappers needed by all components
- arangod - the server process
- the client utilities:
- arangob
- arangobench
- arangoexport
- arangoimp
- arangorestore
- arangosh
- arangovpack
In these directories you will find subdirectories with the hash corosponding to the id of the binaries. Their date should corrospond to the release date of their respective arango release.
This means i.e. for ArangoDB 3.1.11:
https://www.arangodb.com/repositories/symsrv/arangod.pdb/A8B899D2EDFC40E994C30C32FCE5FB346/arangod.pd_
This file is a microsoft cabinet file, which is a little bit compressed. You can dismantle it so the windows explorer offers you its proper handler by renaming it to .cab; click on the now named `arangod.cab`, copy the contained arangod.pdb into your symbol path.
Coredump analysis
-----------------
While Visual studio may cary a nice shiny gui, the concept of GUI fails miserably i.e. in testautomation. Getting an overview over all running threads is a tedious task with it. Here the commandline version of [WinDBG](http://www.windbg.org/) cdb comes to the aid. `testing.js` utilizes it to obtain automatical stack traces for crashes.

View File

@ -34,8 +34,7 @@ add_executable(${TEST_BASICS_SUITE}
../lib/Basics/WorkMonitorDummy.cpp
)
include_directories(
${TEST_BASICS_SUITE}
include_directories(${TEST_BASICS_SUITE}
PUBLIC ${Boost_UNIT_TEST_INCLUDE_DIR}
)
@ -48,7 +47,56 @@ target_link_libraries(${TEST_BASICS_SUITE}
)
if (NOT USE_PRECOMPILED_V8)
add_dependencies(basics_suite v8_build)
add_dependencies(${TEST_BASICS_SUITE} v8_build)
endif ()
################################################################################
## cache_suite
################################################################################
add_executable(${TEST_CACHE_SUITE}
Cache/Runner.cpp
Cache/CachedValue.cpp
Cache/FrequencyBuffer.cpp
Cache/Manager.cpp
Cache/Metadata.cpp
Cache/MockScheduler.cpp
Cache/PlainBucket.cpp
Cache/PlainCache.cpp
Cache/Rebalancer.cpp
Cache/State.cpp
Cache/TransactionalBucket.cpp
Cache/TransactionWindow.cpp
../lib/Basics/WorkMonitorDummy.cpp
../arangod/Cache/Cache.cpp
../arangod/Cache/CacheManagerFeatureThreads.cpp
../arangod/Cache/CachedValue.cpp
../arangod/Cache/Manager.cpp
../arangod/Cache/ManagerTasks.cpp
../arangod/Cache/Metadata.cpp
../arangod/Cache/PlainBucket.cpp
../arangod/Cache/PlainCache.cpp
../arangod/Cache/Rebalancer.cpp
../arangod/Cache/State.cpp
../arangod/Cache/TransactionalBucket.cpp
../arangod/Cache/TransactionalCache.cpp
../arangod/Cache/TransactionWindow.cpp
)
include_directories(${TEST_CACHE_SUITE}
PUBLIC ${Boost_UNIT_TEST_INCLUDE_DIR}
)
target_link_libraries(${TEST_CACHE_SUITE}
${LIB_ARANGO}
${MSVC_LIBS}
boost_system
boost_boost
${SYSTEM_LIBRARIES}
)
if (NOT USE_PRECOMPILED_V8)
add_dependencies(${TEST_CACHE_SUITE} v8_build)
endif ()
################################################################################
@ -68,5 +116,5 @@ target_link_libraries(${TEST_GEO_SUITE}
)
if (NOT USE_PRECOMPILED_V8)
add_dependencies(geo_suite v8_build)
add_dependencies(${TEST_GEO_SUITE} v8_build)
endif ()

View File

@ -0,0 +1,183 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::CachedValue
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/CachedValue.h"
#include <stdint.h>
#include <string>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheCachedValueSetup {
CCacheCachedValueSetup() { BOOST_TEST_MESSAGE("setup CachedValue"); }
~CCacheCachedValueSetup() { BOOST_TEST_MESSAGE("tear-down CachedValue"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheCachedValueTest, CCacheCachedValueSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test construct with valid data
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_construct_valid) {
uint64_t k = 1;
std::string v("test");
CachedValue* cv;
// fixed key, variable value
cv = CachedValue::construct(&k, sizeof(uint64_t), v.data(), v.size());
BOOST_CHECK(nullptr != cv);
BOOST_CHECK_EQUAL(sizeof(uint64_t), cv->keySize);
BOOST_CHECK_EQUAL(v.size(), cv->valueSize);
BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t) + v.size(),
cv->size());
BOOST_CHECK_EQUAL(k, *reinterpret_cast<uint64_t const*>(cv->key()));
BOOST_CHECK_EQUAL(0, memcmp(v.data(), cv->value(), v.size()));
delete cv;
// variable key, fixed value
cv = CachedValue::construct(v.data(), v.size(), &k, sizeof(uint64_t));
BOOST_CHECK(nullptr != cv);
BOOST_CHECK_EQUAL(v.size(), cv->keySize);
BOOST_CHECK_EQUAL(sizeof(uint64_t), cv->valueSize);
BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t) + v.size(),
cv->size());
BOOST_CHECK_EQUAL(0, memcmp(v.data(), cv->key(), v.size()));
BOOST_CHECK_EQUAL(k, *reinterpret_cast<uint64_t const*>(cv->value()));
delete cv;
// fixed key, zero length value
cv = CachedValue::construct(&k, sizeof(uint64_t), nullptr, 0);
BOOST_CHECK(nullptr != cv);
BOOST_CHECK_EQUAL(sizeof(uint64_t), cv->keySize);
BOOST_CHECK_EQUAL(0ULL, cv->valueSize);
BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t), cv->size());
BOOST_CHECK_EQUAL(k, *reinterpret_cast<uint64_t const*>(cv->key()));
BOOST_CHECK(nullptr == cv->value());
delete cv;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test construct with invalid data
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_construct_invalid) {
uint64_t k = 1;
std::string v("test");
CachedValue* cv;
// zero size key
cv = CachedValue::construct(&k, 0, v.data(), v.size());
BOOST_CHECK(nullptr == cv);
// nullptr key, zero size
cv = CachedValue::construct(nullptr, 0, v.data(), v.size());
BOOST_CHECK(nullptr == cv);
// nullptr key, non-zero size
cv = CachedValue::construct(nullptr, sizeof(uint64_t), v.data(), v.size());
BOOST_CHECK(nullptr == cv);
// nullptr value, non-zero length
cv = CachedValue::construct(&k, sizeof(uint64_t), nullptr, v.size());
BOOST_CHECK(nullptr == cv);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test copy
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_copy) {
uint64_t k = 1;
std::string v("test");
// fixed key, variable value
auto original =
CachedValue::construct(&k, sizeof(uint64_t), v.data(), v.size());
auto copy = original->copy();
BOOST_CHECK(nullptr != copy);
BOOST_CHECK_EQUAL(sizeof(uint64_t), copy->keySize);
BOOST_CHECK_EQUAL(v.size(), copy->valueSize);
BOOST_CHECK_EQUAL(sizeof(CachedValue) + sizeof(uint64_t) + v.size(),
copy->size());
BOOST_CHECK_EQUAL(k, *reinterpret_cast<uint64_t const*>(copy->key()));
BOOST_CHECK_EQUAL(0, memcmp(v.data(), copy->value(), v.size()));
delete original;
delete copy;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test key comparison
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_key_comparison) {
std::string k1("test");
std::string k2("testing");
std::string k3("TEST");
uint64_t v = 1;
auto cv = CachedValue::construct(k1.data(), k1.size(), &v, sizeof(uint64_t));
// same key
BOOST_CHECK(cv->sameKey(k1.data(), k1.size()));
// different length, matching prefix
BOOST_CHECK(!cv->sameKey(k2.data(), k2.size()));
// same length, different key
BOOST_CHECK(!cv->sameKey(k3.data(), k3.size()));
delete cv;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,141 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::FrequencyBuffer
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/FrequencyBuffer.h"
#include <stdint.h>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheFrequencyBufferSetup {
CCacheFrequencyBufferSetup() { BOOST_TEST_MESSAGE("setup FrequencyBuffer"); }
~CCacheFrequencyBufferSetup() {
BOOST_TEST_MESSAGE("tear-down FrequencyBuffer");
}
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheFrequencyBufferTest, CCacheFrequencyBufferSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test behavior with ints
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_uint8_t) {
uint8_t zero = 0;
uint8_t one = 1;
uint8_t two = 2;
// check that default construction is as expected
BOOST_CHECK(uint8_t() == zero);
FrequencyBuffer<uint8_t> buffer(8);
BOOST_CHECK_EQUAL(buffer.memoryUsage(), sizeof(FrequencyBuffer<uint8_t>) + 8);
for (size_t i = 0; i < 4; i++) {
buffer.insertRecord(two);
}
for (size_t i = 0; i < 2; i++) {
buffer.insertRecord(one);
}
auto frequencies = buffer.getFrequencies();
BOOST_CHECK_EQUAL(2ULL, frequencies->size());
BOOST_CHECK_EQUAL(one, (*frequencies)[0].first);
BOOST_CHECK_EQUAL(2ULL, (*frequencies)[0].second);
BOOST_CHECK_EQUAL(two, (*frequencies)[1].first);
BOOST_CHECK_EQUAL(4ULL, (*frequencies)[1].second);
for (size_t i = 0; i < 8; i++) {
buffer.insertRecord(one);
}
frequencies = buffer.getFrequencies();
BOOST_CHECK_EQUAL(1ULL, frequencies->size());
BOOST_CHECK_EQUAL(one, (*frequencies)[0].first);
BOOST_CHECK_EQUAL(8ULL, (*frequencies)[0].second);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test behavior with pointers
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_pointers) {
uint8_t* zero = nullptr;
uint8_t one = 1;
uint8_t two = 2;
// check that default construction is as expected
typedef uint8_t* smallptr;
BOOST_CHECK(smallptr() == zero);
FrequencyBuffer<uint8_t*> buffer(8);
BOOST_CHECK_EQUAL(buffer.memoryUsage(),
sizeof(FrequencyBuffer<uint8_t*>) + (8 * sizeof(uint8_t*)));
for (size_t i = 0; i < 4; i++) {
buffer.insertRecord(&two);
}
for (size_t i = 0; i < 2; i++) {
buffer.insertRecord(&one);
}
auto frequencies = buffer.getFrequencies();
BOOST_CHECK_EQUAL(2ULL, frequencies->size());
BOOST_CHECK_EQUAL(&one, (*frequencies)[0].first);
BOOST_CHECK_EQUAL(2ULL, (*frequencies)[0].second);
BOOST_CHECK_EQUAL(&two, (*frequencies)[1].first);
BOOST_CHECK_EQUAL(4ULL, (*frequencies)[1].second);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

257
UnitTests/Cache/Manager.cpp Normal file
View File

@ -0,0 +1,257 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::Manager
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#include "Random/RandomGenerator.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/CacheManagerFeatureThreads.h"
#include "Cache/Manager.h"
#include "Cache/PlainCache.h"
#include "MockScheduler.h"
#include <stdint.h>
#include <queue>
#include <string>
#include <thread>
#include <vector>
#include <iostream>
using namespace arangodb;
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheManagerSetup {
CCacheManagerSetup() { BOOST_TEST_MESSAGE("setup Manager"); }
~CCacheManagerSetup() { BOOST_TEST_MESSAGE("tear-down Manager"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheManagerTest, CCacheManagerSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test constructor with valid data
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_constructor) {
uint64_t requestLimit = 1024 * 1024;
Manager manager(nullptr, requestLimit);
BOOST_CHECK_EQUAL(requestLimit, manager.globalLimit());
BOOST_CHECK(0ULL < manager.globalAllocation());
BOOST_CHECK(requestLimit > manager.globalAllocation());
uint64_t bigRequestLimit = 4ULL * 1024ULL * 1024ULL * 1024ULL;
Manager bigManager(nullptr, bigRequestLimit);
BOOST_CHECK_EQUAL(bigRequestLimit, bigManager.globalLimit());
BOOST_CHECK((1024ULL * 1024ULL) < bigManager.globalAllocation());
BOOST_CHECK(bigRequestLimit > bigManager.globalAllocation());
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test mixed load behavior (multi-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_mixed_load) {
uint64_t initialSize = 16ULL * 1024ULL;
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
size_t cacheCount = 4;
size_t threadCount = 4;
std::vector<std::shared_ptr<Cache>> caches;
for (size_t i = 0; i < cacheCount; i++) {
caches.emplace_back(
manager.createCache(Manager::CacheType::Plain, initialSize, true));
}
uint64_t chunkSize = 4 * 1024 * 1024;
uint64_t initialInserts = 1 * 1024 * 1024;
uint64_t operationCount = 4 * 1024 * 1024;
std::atomic<uint64_t> hitCount(0);
std::atomic<uint64_t> missCount(0);
auto worker = [&manager, &caches, cacheCount, initialInserts, operationCount,
&hitCount,
&missCount](uint64_t lower, uint64_t upper) -> void {
// fill with some initial data
for (uint64_t i = 0; i < initialInserts; i++) {
uint64_t item = lower + i;
size_t cacheIndex = item % cacheCount;
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
&item, sizeof(uint64_t));
bool ok = caches[cacheIndex]->insert(value);
if (!ok) {
delete value;
}
}
// initialize valid range for keys that *might* be in cache
uint64_t validLower = lower;
uint64_t validUpper = lower + initialInserts - 1;
// commence mixed workload
for (uint64_t i = 0; i < operationCount; i++) {
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(99UL));
if (r >= 99) { // remove something
if (validLower == validUpper) {
continue; // removed too much
}
uint64_t item = validLower++;
size_t cacheIndex = item % cacheCount;
caches[cacheIndex]->remove(&item, sizeof(uint64_t));
} else if (r >= 95) { // insert something
if (validUpper == upper) {
continue; // already maxed out range
}
uint64_t item = ++validUpper;
size_t cacheIndex = item % cacheCount;
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
&item, sizeof(uint64_t));
bool ok = caches[cacheIndex]->insert(value);
if (!ok) {
delete value;
}
} else { // lookup something
uint64_t item = RandomGenerator::interval(
static_cast<int64_t>(validLower), static_cast<int64_t>(validUpper));
size_t cacheIndex = item % cacheCount;
Cache::Finding f = caches[cacheIndex]->find(&item, sizeof(uint64_t));
if (f.found()) {
hitCount++;
TRI_ASSERT(f.value() != nullptr);
TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t)));
} else {
missCount++;
TRI_ASSERT(f.value() == nullptr);
}
}
}
};
std::vector<std::thread*> threads;
// dispatch threads
for (size_t i = 0; i < threadCount; i++) {
uint64_t lower = i * chunkSize;
uint64_t upper = ((i + 1) * chunkSize) - 1;
threads.push_back(new std::thread(worker, lower, upper));
}
// join threads
for (auto t : threads) {
t->join();
delete t;
}
for (auto cache : caches) {
manager.destroyCache(cache);
}
RandomGenerator::shutdown();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test creation/destruction chaos (multi-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_lifecycle_chaos) {
uint64_t initialSize = 16ULL * 1024ULL;
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
size_t threadCount = 4;
uint64_t operationCount = 4ULL * 1024ULL;
auto worker = [&manager, initialSize, operationCount]() -> void {
std::queue<std::shared_ptr<Cache>> caches;
for (uint64_t i = 0; i < operationCount; i++) {
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(1UL));
switch (r) {
case 0: {
caches.emplace(manager.createCache(Manager::CacheType::Plain,
initialSize, true));
}
case 1:
default: {
if (!caches.empty()) {
auto cache = caches.front();
caches.pop();
manager.destroyCache(cache);
}
}
}
}
};
std::vector<std::thread*> threads;
// dispatch threads
for (size_t i = 0; i < threadCount; i++) {
threads.push_back(new std::thread(worker));
}
// join threads
for (auto t : threads) {
t->join();
delete t;
}
RandomGenerator::shutdown();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,207 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::Metadata
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/Metadata.h"
#include <stdint.h>
#include <memory>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheMetadataSetup {
CCacheMetadataSetup() { BOOST_TEST_MESSAGE("setup Metadata"); }
~CCacheMetadataSetup() { BOOST_TEST_MESSAGE("tear-down Metadata"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheMetadataTest, CCacheMetadataSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test constructor with valid data
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_constructor) {
uint64_t dummy;
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
[](Cache* p) -> void {});
uint8_t dummyTable;
uint32_t logSize = 1;
uint64_t limit = 1024;
Metadata metadata(dummyCache, limit, &dummyTable, logSize);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test getters
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_getters) {
uint64_t dummy;
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
[](Cache* p) -> void {});
uint8_t dummyTable;
uint32_t logSize = 1;
uint64_t limit = 1024;
Metadata metadata(dummyCache, limit, &dummyTable, logSize);
metadata.lock();
BOOST_CHECK(dummyCache == metadata.cache());
BOOST_CHECK_EQUAL(logSize, metadata.logSize());
BOOST_CHECK_EQUAL(0UL, metadata.auxiliaryLogSize());
BOOST_CHECK_EQUAL(limit, metadata.softLimit());
BOOST_CHECK_EQUAL(limit, metadata.hardLimit());
BOOST_CHECK_EQUAL(0UL, metadata.usage());
BOOST_CHECK(&dummyTable == metadata.table());
BOOST_CHECK(nullptr == metadata.auxiliaryTable());
metadata.unlock();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test usage limits
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_usage_limits) {
uint64_t dummy;
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
[](Cache* p) -> void {});
uint8_t dummyTable;
uint32_t logSize = 1;
bool success;
Metadata metadata(dummyCache, 1024ULL, &dummyTable, logSize);
metadata.lock();
success = metadata.adjustUsageIfAllowed(512LL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(512LL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(512LL);
BOOST_CHECK(!success);
success = metadata.adjustLimits(2048ULL, 2048ULL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(1024LL);
BOOST_CHECK(success);
success = metadata.adjustLimits(1024ULL, 2048ULL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(512LL);
BOOST_CHECK(!success);
success = metadata.adjustUsageIfAllowed(-512LL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(512LL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(-1024LL);
BOOST_CHECK(success);
success = metadata.adjustUsageIfAllowed(512LL);
BOOST_CHECK(!success);
success = metadata.adjustLimits(1024ULL, 1024ULL);
BOOST_CHECK(success);
success = metadata.adjustLimits(512ULL, 512ULL);
BOOST_CHECK(!success);
metadata.unlock();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test migration methods
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_migration) {
uint64_t dummy;
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
[](Cache* p) -> void {});
uint8_t dummyTable;
uint8_t dummyAuxiliaryTable;
uint32_t logSize = 1;
uint32_t auxiliaryLogSize = 2;
uint64_t limit = 1024;
Metadata metadata(dummyCache, limit, &dummyTable, logSize);
metadata.lock();
metadata.grantAuxiliaryTable(&dummyAuxiliaryTable, auxiliaryLogSize);
BOOST_CHECK_EQUAL(auxiliaryLogSize, metadata.auxiliaryLogSize());
BOOST_CHECK(&dummyAuxiliaryTable == metadata.auxiliaryTable());
metadata.swapTables();
BOOST_CHECK_EQUAL(logSize, metadata.auxiliaryLogSize());
BOOST_CHECK_EQUAL(auxiliaryLogSize, metadata.logSize());
BOOST_CHECK(&dummyTable == metadata.auxiliaryTable());
BOOST_CHECK(&dummyAuxiliaryTable == metadata.table());
uint8_t* result = metadata.releaseAuxiliaryTable();
BOOST_CHECK_EQUAL(0UL, metadata.auxiliaryLogSize());
BOOST_CHECK(nullptr == metadata.auxiliaryTable());
BOOST_CHECK(result == &dummyTable);
result = metadata.releaseTable();
BOOST_CHECK_EQUAL(0UL, metadata.logSize());
BOOST_CHECK(nullptr == metadata.table());
BOOST_CHECK(result == &dummyAuxiliaryTable);
metadata.unlock();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,59 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief helper for cache suite
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "MockScheduler.h"
#include "Basics/Common.h"
#include <boost/asio/io_service.hpp>
#include <boost/bind.hpp>
#include <boost/thread/thread.hpp>
#include <memory>
using namespace arangodb::cache;
MockScheduler::MockScheduler(size_t threads)
: _ioService(new boost::asio::io_service()),
_serviceGuard(new boost::asio::io_service::work(*_ioService)) {
for (size_t i = 0; i < threads; i++) {
auto worker = std::bind(static_cast<size_t (boost::asio::io_service::*)()>(
&boost::asio::io_service::run),
_ioService.get());
_group.emplace_back(new std::thread(worker));
}
}
MockScheduler::~MockScheduler() {
_serviceGuard.reset();
for (auto g : _group) {
g->join();
delete g;
}
_ioService->stop();
}
boost::asio::io_service* MockScheduler::ioService() { return _ioService.get(); }

View File

@ -0,0 +1,57 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief helper for cache suite
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#ifndef UNITTESTS_CACHE_MOCK_SCHEDULER_H
#define UNITTESTS_CACHE_MOCK_SCHEDULER_H
#include "Basics/Common.h"
#include "Basics/asio-helper.h"
#include <memory>
#include <thread>
#include <vector>
namespace arangodb {
namespace cache {
class MockScheduler {
typedef std::unique_ptr<boost::asio::io_service::work> asio_worker;
std::unique_ptr<boost::asio::io_service> _ioService;
std::unique_ptr<boost::asio::io_service::work> _serviceGuard;
std::vector<std::thread*> _group;
public:
MockScheduler(size_t threads);
~MockScheduler();
boost::asio::io_service* ioService();
};
}; // end namespace cache
}; // end namespace arangodb
#endif

View File

@ -0,0 +1,236 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::PlainBucket
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/PlainBucket.h"
#include <stdint.h>
#include <string>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCachePlainBucketSetup {
CCachePlainBucketSetup() { BOOST_TEST_MESSAGE("setup PlainBucket"); }
~CCachePlainBucketSetup() { BOOST_TEST_MESSAGE("tear-down PlainBucket"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCachePlainBucketTest, CCachePlainBucketSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test insertion to full and fail beyond
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_insertion) {
PlainBucket bucket;
bool success;
uint32_t hashes[6] = {
1, 2, 3,
4, 5, 6}; // don't have to be real, but should be unique and non-zero
uint64_t keys[6] = {0, 1, 2, 3, 4, 5};
uint64_t values[6] = {0, 1, 2, 3, 4, 5};
CachedValue* ptrs[6];
for (size_t i = 0; i < 6; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(-1LL);
BOOST_CHECK(success);
// insert five to fill
BOOST_CHECK(!bucket.isFull());
for (size_t i = 0; i < 5; i++) {
bucket.insert(hashes[i], ptrs[i]);
if (i < 4) {
BOOST_CHECK(!bucket.isFull());
} else {
BOOST_CHECK(bucket.isFull());
}
}
for (size_t i = 0; i < 5; i++) {
CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
// check that insert is ignored if full
bucket.insert(hashes[5], ptrs[5]);
CachedValue* res = bucket.find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
BOOST_CHECK(nullptr == res);
bucket.unlock();
// cleanup
for (size_t i = 0; i < 6; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test removal
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_removal) {
PlainBucket bucket;
bool success;
uint32_t hashes[3] = {
1, 2, 3}; // don't have to be real, but should be unique and non-zero
uint64_t keys[3] = {0, 1, 2};
uint64_t values[3] = {0, 1, 2};
CachedValue* ptrs[3];
for (size_t i = 0; i < 3; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(-1LL);
BOOST_CHECK(success);
for (size_t i = 0; i < 3; i++) {
bucket.insert(hashes[i], ptrs[i]);
}
for (size_t i = 0; i < 3; i++) {
CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
CachedValue* res;
res = bucket.remove(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(res == ptrs[1]);
res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(nullptr == res);
res = bucket.remove(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(res == ptrs[0]);
res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(nullptr == res);
res = bucket.remove(hashes[2], ptrs[2]->key(), ptrs[2]->keySize);
BOOST_CHECK(res == ptrs[2]);
res = bucket.find(hashes[2], ptrs[2]->key(), ptrs[2]->keySize);
BOOST_CHECK(nullptr == res);
bucket.unlock();
// cleanup
for (size_t i = 0; i < 3; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test eviction with subsequent insertion
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_eviction) {
PlainBucket bucket;
bool success;
uint32_t hashes[6] = {
1, 2, 3,
4, 5, 6}; // don't have to be real, but should be unique and non-zero
uint64_t keys[6] = {0, 1, 2, 3, 4, 5};
uint64_t values[6] = {0, 1, 2, 3, 4, 5};
CachedValue* ptrs[6];
for (size_t i = 0; i < 6; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(-1LL);
BOOST_CHECK(success);
// insert five to fill
BOOST_CHECK(!bucket.isFull());
for (size_t i = 0; i < 5; i++) {
bucket.insert(hashes[i], ptrs[i]);
if (i < 4) {
BOOST_CHECK(!bucket.isFull());
} else {
BOOST_CHECK(bucket.isFull());
}
}
for (size_t i = 0; i < 5; i++) {
CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
// check that we get proper eviction candidate
CachedValue* candidate = bucket.evictionCandidate();
BOOST_CHECK(candidate == ptrs[0]);
bucket.evict(candidate, false);
CachedValue* res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(nullptr == res);
BOOST_CHECK(!bucket.isFull());
// check that we still find the right candidate if not full
candidate = bucket.evictionCandidate();
BOOST_CHECK(candidate == ptrs[1]);
bucket.evict(candidate, true);
res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(nullptr == res);
BOOST_CHECK(!bucket.isFull());
// check that we can insert now after eviction optimized for insertion
bucket.insert(hashes[5], ptrs[5]);
res = bucket.find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
BOOST_CHECK(res == ptrs[5]);
bucket.unlock();
// cleanup
for (size_t i = 0; i < 6; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,360 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::PlainBucket
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#include "Random/RandomGenerator.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/Manager.h"
#include "Cache/PlainCache.h"
#include "MockScheduler.h"
#include <stdint.h>
#include <string>
#include <thread>
#include <vector>
#include <iostream>
using namespace arangodb;
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCachePlainCacheSetup {
CCachePlainCacheSetup() { BOOST_TEST_MESSAGE("setup PlainCache"); }
~CCachePlainCacheSetup() { BOOST_TEST_MESSAGE("tear-down PlainCache"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCachePlainCacheTest, CCachePlainCacheSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test construction (single-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_st_construction) {
Manager manager(nullptr, 1024ULL * 1024ULL);
auto cache1 =
manager.createCache(Manager::CacheType::Plain, 256ULL * 1024ULL, false);
auto cache2 =
manager.createCache(Manager::CacheType::Plain, 512ULL * 1024ULL, false);
BOOST_CHECK_EQUAL(0ULL, cache1->usage());
BOOST_CHECK_EQUAL(256ULL * 1024ULL, cache1->limit());
BOOST_CHECK_EQUAL(0ULL, cache2->usage());
BOOST_CHECK(512ULL * 1024ULL > cache2->limit());
manager.destroyCache(cache1);
manager.destroyCache(cache2);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test insertion (single-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_st_insertion) {
uint64_t cacheLimit = 256ULL * 1024ULL;
Manager manager(nullptr, 4ULL * cacheLimit);
auto cache =
manager.createCache(Manager::CacheType::Plain, cacheLimit, false);
for (uint64_t i = 0; i < 1024; i++) {
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
bool success = cache->insert(value);
BOOST_CHECK(success);
auto f = cache->find(&i, sizeof(uint64_t));
BOOST_CHECK(f.found());
}
for (uint64_t i = 0; i < 1024; i++) {
uint64_t j = 2 * i;
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &j, sizeof(uint64_t));
bool success = cache->insert(value);
BOOST_CHECK(success);
auto f = cache->find(&i, sizeof(uint64_t));
BOOST_CHECK(f.found());
BOOST_CHECK(0 == memcmp(f.value()->value(), &j, sizeof(uint64_t)));
}
uint64_t notInserted = 0;
for (uint64_t i = 1024; i < 128 * 1024; i++) {
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
bool success = cache->insert(value);
if (success) {
auto f = cache->find(&i, sizeof(uint64_t));
BOOST_CHECK(f.found());
} else {
delete value;
notInserted++;
}
}
BOOST_CHECK(notInserted > 0);
manager.destroyCache(cache);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test removal (single-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_st_removal) {
uint64_t cacheLimit = 256ULL * 1024ULL;
Manager manager(nullptr, 4ULL * cacheLimit);
auto cache =
manager.createCache(Manager::CacheType::Plain, cacheLimit, false);
for (uint64_t i = 0; i < 1024; i++) {
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
bool success = cache->insert(value);
BOOST_CHECK(success);
auto f = cache->find(&i, sizeof(uint64_t));
BOOST_CHECK(f.found());
BOOST_CHECK(f.value() != nullptr);
BOOST_CHECK(f.value()->sameKey(&i, sizeof(uint64_t)));
}
// test removal of bogus keys
for (uint64_t i = 1024; i < 2048; i++) {
bool removed = cache->remove(&i, sizeof(uint64_t));
BOOST_ASSERT(removed);
// ensure existing keys not removed
for (uint64_t j = 0; j < 1024; j++) {
auto f = cache->find(&j, sizeof(uint64_t));
BOOST_CHECK(f.found());
BOOST_CHECK(f.value() != nullptr);
BOOST_CHECK(f.value()->sameKey(&j, sizeof(uint64_t)));
}
}
// remove actual keys
for (uint64_t i = 0; i < 1024; i++) {
bool removed = cache->remove(&i, sizeof(uint64_t));
BOOST_CHECK(removed);
auto f = cache->find(&i, sizeof(uint64_t));
BOOST_CHECK(!f.found());
}
manager.destroyCache(cache);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test growth behavior (single-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_st_growth) {
uint64_t initialSize = 16ULL * 1024ULL;
uint64_t minimumSize = 64ULL * initialSize;
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
auto cache =
manager.createCache(Manager::CacheType::Plain, initialSize, true);
for (uint64_t i = 0; i < 4ULL * 1024ULL * 1024ULL; i++) {
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
bool success = cache->insert(value);
if (!success) {
delete value;
}
}
BOOST_CHECK(cache->usage() > minimumSize);
manager.destroyCache(cache);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test shrink behavior (single-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_st_shrink) {
uint64_t initialSize = 16ULL * 1024ULL;
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
auto cache =
manager.createCache(Manager::CacheType::Plain, initialSize, true);
for (uint64_t i = 0; i < 16ULL * 1024ULL * 1024ULL; i++) {
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
bool success = cache->insert(value);
if (!success) {
delete value;
}
}
uint64_t target = cache->usage() / 2;
while (!cache->resize(target)) {
};
for (uint64_t i = 0; i < 16ULL * 1024ULL * 1024ULL; i++) {
CachedValue* value =
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
bool success = cache->insert(value);
if (!success) {
delete value;
}
}
uint64_t lastUsage = cache->usage();
while (true) {
usleep(10000);
if (cache->usage() == lastUsage) {
break;
}
lastUsage = cache->usage();
}
BOOST_CHECK_MESSAGE(cache->usage() <= target,
cache->usage() << " !<= " << target);
manager.destroyCache(cache);
RandomGenerator::shutdown();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test mixed load behavior (multi-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_mt_mixed_load) {
uint64_t initialSize = 16ULL * 1024ULL;
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
size_t threadCount = 4;
std::shared_ptr<Cache> cache =
manager.createCache(Manager::CacheType::Plain, initialSize, true);
uint64_t chunkSize = 16 * 1024 * 1024;
uint64_t initialInserts = 4 * 1024 * 1024;
uint64_t operationCount = 16 * 1024 * 1024;
std::atomic<uint64_t> hitCount(0);
std::atomic<uint64_t> missCount(0);
auto worker = [&manager, &cache, initialInserts, operationCount, &hitCount,
&missCount](uint64_t lower, uint64_t upper) -> void {
// fill with some initial data
for (uint64_t i = 0; i < initialInserts; i++) {
uint64_t item = lower + i;
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
&item, sizeof(uint64_t));
bool ok = cache->insert(value);
if (!ok) {
delete value;
}
}
// initialize valid range for keys that *might* be in cache
uint64_t validLower = lower;
uint64_t validUpper = lower + initialInserts - 1;
// commence mixed workload
for (uint64_t i = 0; i < operationCount; i++) {
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(99UL));
if (r >= 99) { // remove something
if (validLower == validUpper) {
continue; // removed too much
}
uint64_t item = validLower++;
cache->remove(&item, sizeof(uint64_t));
} else if (r >= 95) { // insert something
if (validUpper == upper) {
continue; // already maxed out range
}
uint64_t item = ++validUpper;
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
&item, sizeof(uint64_t));
bool ok = cache->insert(value);
if (!ok) {
delete value;
}
} else { // lookup something
uint64_t item = RandomGenerator::interval(
static_cast<int64_t>(validLower), static_cast<int64_t>(validUpper));
Cache::Finding f = cache->find(&item, sizeof(uint64_t));
if (f.found()) {
hitCount++;
TRI_ASSERT(f.value() != nullptr);
TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t)));
} else {
missCount++;
TRI_ASSERT(f.value() == nullptr);
}
}
}
};
std::vector<std::thread*> threads;
// dispatch threads
for (size_t i = 0; i < threadCount; i++) {
uint64_t lower = i * chunkSize;
uint64_t upper = ((i + 1) * chunkSize) - 1;
threads.push_back(new std::thread(worker, lower, upper));
}
// join threads
for (auto t : threads) {
t->join();
delete t;
}
manager.destroyCache(cache);
RandomGenerator::shutdown();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,203 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::Manager
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#include "Random/RandomGenerator.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/Manager.h"
#include "Cache/PlainCache.h"
#include "Cache/Rebalancer.h"
#include "MockScheduler.h"
#include <stdint.h>
#include <queue>
#include <string>
#include <thread>
#include <vector>
using namespace arangodb;
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheRebalancerSetup {
CCacheRebalancerSetup() { BOOST_TEST_MESSAGE("setup Rebalancer"); }
~CCacheRebalancerSetup() { BOOST_TEST_MESSAGE("tear-down Rebalancer"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheRebalancerTest, CCacheRebalancerSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test rebalancing (multi-threaded)
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_rebalancing) {
uint64_t initialSize = 16ULL * 1024ULL;
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
MockScheduler scheduler(4);
Manager manager(scheduler.ioService(), 128ULL * 1024ULL * 1024ULL);
Rebalancer rebalancer(&manager);
size_t cacheCount = 4;
size_t threadCount = 4;
std::vector<std::shared_ptr<Cache>> caches;
for (size_t i = 0; i < cacheCount; i++) {
caches.emplace_back(
manager.createCache(Manager::CacheType::Plain, initialSize, true));
}
bool doneRebalancing = false;
auto rebalanceWorker = [&rebalancer, &doneRebalancing]() -> void {
while (!doneRebalancing) {
bool rebalanced = rebalancer.rebalance();
if (rebalanced) {
usleep(500 * 1000);
} else {
usleep(100);
}
}
};
auto rebalancerThread = new std::thread(rebalanceWorker);
uint64_t chunkSize = 4 * 1024 * 1024;
uint64_t initialInserts = 1 * 1024 * 1024;
uint64_t operationCount = 4 * 1024 * 1024;
std::atomic<uint64_t> hitCount(0);
std::atomic<uint64_t> missCount(0);
auto worker = [&manager, &caches, cacheCount, initialInserts, operationCount,
&hitCount,
&missCount](uint64_t lower, uint64_t upper) -> void {
// fill with some initial data
for (uint64_t i = 0; i < initialInserts; i++) {
uint64_t item = lower + i;
size_t cacheIndex = item % cacheCount;
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
&item, sizeof(uint64_t));
bool ok = caches[cacheIndex]->insert(value);
if (!ok) {
delete value;
}
}
// initialize valid range for keys that *might* be in cache
uint64_t validLower = lower;
uint64_t validUpper = lower + initialInserts - 1;
// commence mixed workload
for (uint64_t i = 0; i < operationCount; i++) {
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(99UL));
if (r >= 99) { // remove something
if (validLower == validUpper) {
continue; // removed too much
}
uint64_t item = validLower++;
size_t cacheIndex = item % cacheCount;
caches[cacheIndex]->remove(&item, sizeof(uint64_t));
} else if (r >= 95) { // insert something
if (validUpper == upper) {
continue; // already maxed out range
}
uint64_t item = ++validUpper;
size_t cacheIndex = item % cacheCount;
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
&item, sizeof(uint64_t));
bool ok = caches[cacheIndex]->insert(value);
if (!ok) {
delete value;
}
} else { // lookup something
uint64_t item = RandomGenerator::interval(
static_cast<int64_t>(validLower), static_cast<int64_t>(validUpper));
size_t cacheIndex = item % cacheCount;
Cache::Finding f = caches[cacheIndex]->find(&item, sizeof(uint64_t));
if (f.found()) {
hitCount++;
TRI_ASSERT(f.value() != nullptr);
TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t)));
} else {
missCount++;
TRI_ASSERT(f.value() == nullptr);
}
}
}
};
std::vector<std::thread*> threads;
// dispatch threads
for (size_t i = 0; i < threadCount; i++) {
uint64_t lower = i * chunkSize;
uint64_t upper = ((i + 1) * chunkSize) - 1;
threads.push_back(new std::thread(worker, lower, upper));
}
// join threads
for (auto t : threads) {
t->join();
delete t;
}
doneRebalancing = true;
rebalancerThread->join();
delete rebalancerThread;
for (auto cache : caches) {
manager.destroyCache(cache);
}
RandomGenerator::shutdown();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,2 @@
#define BOOST_TEST_MODULE "C/C++ Unit Tests for ArangoDB Cache"
#include <boost/test/included/unit_test.hpp>

138
UnitTests/Cache/State.cpp Normal file
View File

@ -0,0 +1,138 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::State
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/State.h"
#include <stdint.h>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheStateSetup {
CCacheStateSetup() { BOOST_TEST_MESSAGE("setup State"); }
~CCacheStateSetup() { BOOST_TEST_MESSAGE("tear-down State"); }
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheStateTest, CCacheStateSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test lock methods
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_lock) {
State state;
bool success;
uint32_t outsideState = 0;
auto cb1 = [&outsideState]() -> void { outsideState = 1; };
auto cb2 = [&outsideState]() -> void { outsideState = 2; };
// check lock without contention
BOOST_CHECK(!state.isLocked());
success = state.lock(-1, cb1);
BOOST_CHECK(success);
BOOST_CHECK(state.isLocked());
BOOST_CHECK_EQUAL(1UL, outsideState);
// check lock with contention
success = state.lock(10LL, cb2);
BOOST_CHECK(!success);
BOOST_CHECK(state.isLocked());
BOOST_CHECK_EQUAL(1UL, outsideState);
// check unlock
state.unlock();
BOOST_CHECK(!state.isLocked());
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test methods for non-lock flags
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_flags) {
State state;
bool success;
success = state.lock();
BOOST_CHECK(success);
BOOST_CHECK(!state.isSet(State::Flag::migrated));
state.unlock();
success = state.lock();
BOOST_CHECK(success);
BOOST_CHECK(!state.isSet(State::Flag::migrated));
state.toggleFlag(State::Flag::migrated);
BOOST_CHECK(state.isSet(State::Flag::migrated));
state.unlock();
success = state.lock();
BOOST_CHECK(success);
BOOST_CHECK(state.isSet(State::Flag::migrated));
state.unlock();
success = state.lock();
BOOST_CHECK(success);
BOOST_CHECK(state.isSet(State::Flag::migrated));
state.toggleFlag(State::Flag::migrated);
BOOST_CHECK(!state.isSet(State::Flag::migrated));
state.unlock();
success = state.lock();
BOOST_CHECK(success);
BOOST_CHECK(!state.isSet(State::Flag::migrated));
state.unlock();
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,98 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::TransactionWindow
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/TransactionWindow.h"
#include <stdint.h>
#include <iostream>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheTransactionWindowSetup {
CCacheTransactionWindowSetup() {
BOOST_TEST_MESSAGE("setup TransactionWindow");
}
~CCacheTransactionWindowSetup() {
BOOST_TEST_MESSAGE("tear-down TransactionWindow");
}
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheTransactionWindowTest,
CCacheTransactionWindowSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test transaction term management
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_transaction_term) {
TransactionWindow transactions;
BOOST_CHECK_EQUAL(0ULL, transactions.term());
transactions.start();
BOOST_CHECK_EQUAL(1ULL, transactions.term());
transactions.end();
BOOST_CHECK_EQUAL(2ULL, transactions.term());
transactions.start();
BOOST_CHECK_EQUAL(3ULL, transactions.term());
transactions.start();
BOOST_CHECK_EQUAL(3ULL, transactions.term());
transactions.end();
BOOST_CHECK_EQUAL(3ULL, transactions.term());
transactions.end();
BOOST_CHECK_EQUAL(4ULL, transactions.term());
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -0,0 +1,356 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite for arangodb::cache::TransactionalBucket
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2017 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Daniel H. Larkin
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "Basics/Common.h"
#define BOOST_TEST_INCLUDED
#include <boost/test/unit_test.hpp>
#include "Cache/TransactionalBucket.h"
#include <stdint.h>
#include <string>
using namespace arangodb::cache;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
// -----------------------------------------------------------------------------
struct CCacheTransactionalBucketSetup {
CCacheTransactionalBucketSetup() {
BOOST_TEST_MESSAGE("setup TransactionalBucket");
}
~CCacheTransactionalBucketSetup() {
BOOST_TEST_MESSAGE("tear-down TransactionalBucket");
}
};
// -----------------------------------------------------------------------------
// --SECTION-- test suite
// -----------------------------------------------------------------------------
////////////////////////////////////////////////////////////////////////////////
/// @brief setup
////////////////////////////////////////////////////////////////////////////////
BOOST_FIXTURE_TEST_SUITE(CCacheTransactionalBucketTest,
CCacheTransactionalBucketSetup)
////////////////////////////////////////////////////////////////////////////////
/// @brief test lock methods
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_locks) {
TransactionalBucket bucket;
bool success;
// check lock without contention
BOOST_CHECK(!bucket.isLocked());
success = bucket.lock(0ULL, -1LL);
BOOST_CHECK(success);
BOOST_CHECK(bucket.isLocked());
// check lock with contention
success = bucket.lock(0ULL, 10LL);
BOOST_CHECK(!success);
BOOST_CHECK(bucket.isLocked());
// check unlock
bucket.unlock();
BOOST_CHECK(!bucket.isLocked());
// check that blacklist term is updated appropriately
BOOST_CHECK_EQUAL(0ULL, bucket._blacklistTerm);
bucket.lock(1ULL, -1LL);
BOOST_CHECK_EQUAL(1ULL, bucket._blacklistTerm);
bucket.unlock();
BOOST_CHECK_EQUAL(1ULL, bucket._blacklistTerm);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test insertion to full and fail beyond
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_insertion) {
TransactionalBucket bucket;
bool success;
uint32_t hashes[4] = {
1, 2, 3, 4}; // don't have to be real, but should be unique and non-zero
uint64_t keys[4] = {0, 1, 2, 3};
uint64_t values[4] = {0, 1, 2, 3};
CachedValue* ptrs[4];
for (size_t i = 0; i < 4; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(0, -1LL);
BOOST_CHECK(success);
// insert three to fill
BOOST_CHECK(!bucket.isFull());
for (size_t i = 0; i < 3; i++) {
bucket.insert(hashes[i], ptrs[i]);
if (i < 2) {
BOOST_CHECK(!bucket.isFull());
} else {
BOOST_CHECK(bucket.isFull());
}
}
for (size_t i = 0; i < 3; i++) {
CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
// check that insert is ignored if full
bucket.insert(hashes[3], ptrs[3]);
CachedValue* res = bucket.find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize);
BOOST_CHECK(nullptr == res);
bucket.unlock();
// cleanup
for (size_t i = 0; i < 4; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test removal
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_removal) {
TransactionalBucket bucket;
bool success;
uint32_t hashes[3] = {
1, 2, 3}; // don't have to be real, but should be unique and non-zero
uint64_t keys[3] = {0, 1, 2};
uint64_t values[3] = {0, 1, 2};
CachedValue* ptrs[3];
for (size_t i = 0; i < 3; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(0, -1LL);
BOOST_CHECK(success);
for (size_t i = 0; i < 3; i++) {
bucket.insert(hashes[i], ptrs[i]);
}
for (size_t i = 0; i < 3; i++) {
CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
CachedValue* res;
res = bucket.remove(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(res == ptrs[1]);
res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(nullptr == res);
res = bucket.remove(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(res == ptrs[0]);
res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(nullptr == res);
res = bucket.remove(hashes[2], ptrs[2]->key(), ptrs[2]->keySize);
BOOST_CHECK(res == ptrs[2]);
res = bucket.find(hashes[2], ptrs[2]->key(), ptrs[2]->keySize);
BOOST_CHECK(nullptr == res);
bucket.unlock();
// cleanup
for (size_t i = 0; i < 3; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test eviction with subsequent insertion
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_eviction) {
TransactionalBucket bucket;
bool success;
uint32_t hashes[4] = {
1, 2, 3, 4}; // don't have to be real, but should be unique and non-zero
uint64_t keys[4] = {0, 1, 2, 3};
uint64_t values[4] = {0, 1, 2, 3};
CachedValue* ptrs[4];
for (size_t i = 0; i < 4; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(0, -1LL);
BOOST_CHECK(success);
// insert three to fill
BOOST_CHECK(!bucket.isFull());
for (size_t i = 0; i < 3; i++) {
bucket.insert(hashes[i], ptrs[i]);
if (i < 2) {
BOOST_CHECK(!bucket.isFull());
} else {
BOOST_CHECK(bucket.isFull());
}
}
for (size_t i = 0; i < 3; i++) {
CachedValue* res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
// check that we get proper eviction candidate
CachedValue* candidate = bucket.evictionCandidate();
BOOST_CHECK(candidate == ptrs[0]);
bucket.evict(candidate, false);
CachedValue* res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(nullptr == res);
BOOST_CHECK(!bucket.isFull());
// check that we still find the right candidate if not full
candidate = bucket.evictionCandidate();
BOOST_CHECK(candidate == ptrs[1]);
bucket.evict(candidate, true);
res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(nullptr == res);
BOOST_CHECK(!bucket.isFull());
// check that we can insert now after eviction optimized for insertion
bucket.insert(hashes[3], ptrs[3]);
res = bucket.find(hashes[3], ptrs[3]->key(), ptrs[3]->keySize);
BOOST_CHECK(res == ptrs[3]);
bucket.unlock();
// cleanup
for (size_t i = 0; i < 4; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test blacklist methods
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_CASE(tst_blacklist) {
TransactionalBucket bucket;
bool success;
CachedValue* res;
uint32_t hashes[7] = {1, 1, 2, 3,
4, 5, 6}; // don't have to be real, want some overlap
uint64_t keys[6] = {0, 1, 2, 3, 4, 5};
uint64_t values[6] = {0, 1, 2, 3, 4, 5};
CachedValue* ptrs[6];
for (size_t i = 0; i < 6; i++) {
ptrs[i] = CachedValue::construct(&(keys[i]), sizeof(uint64_t), &(values[i]),
sizeof(uint64_t));
}
success = bucket.lock(0, -1LL);
BOOST_CHECK(success);
// insert three to fill
BOOST_CHECK(!bucket.isFull());
for (size_t i = 0; i < 3; i++) {
bucket.insert(hashes[i], ptrs[i]);
if (i < 2) {
BOOST_CHECK(!bucket.isFull());
} else {
BOOST_CHECK(bucket.isFull());
}
}
for (size_t i = 0; i < 3; i++) {
res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(res == ptrs[i]);
}
// blacklist 1-4 to fill blacklist
for (size_t i = 1; i < 5; i++) {
bucket.blacklist(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
}
for (size_t i = 1; i < 5; i++) {
BOOST_CHECK(bucket.isBlacklisted(hashes[i]));
res = bucket.find(hashes[i], ptrs[i]->key(), ptrs[i]->keySize);
BOOST_CHECK(nullptr == res);
}
// verify actually not fully blacklisted
BOOST_CHECK(!bucket.isFullyBlacklisted());
BOOST_CHECK(!bucket.isBlacklisted(hashes[6]));
// verify it didn't remove matching hash with non-matching key
res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(res == ptrs[0]);
// verify we can't insert a key with a blacklisted hash
bucket.insert(hashes[1], ptrs[1]);
res = bucket.find(hashes[1], ptrs[1]->key(), ptrs[1]->keySize);
BOOST_CHECK(nullptr == res);
// proceed to fully blacklist
bucket.blacklist(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
BOOST_CHECK(bucket.isBlacklisted(hashes[5]));
res = bucket.find(hashes[5], ptrs[5]->key(), ptrs[5]->keySize);
BOOST_CHECK(nullptr == res);
// make sure it still didn't remove non-matching key
res = bucket.find(hashes[0], ptrs[0]->key(), ptrs[0]->keySize);
BOOST_CHECK(ptrs[0] == res);
// make sure it's fully blacklisted
BOOST_CHECK(bucket.isFullyBlacklisted());
BOOST_CHECK(bucket.isBlacklisted(hashes[6]));
bucket.unlock();
// check that updating blacklist term clears blacklist
bucket.lock(2ULL, -1LL);
BOOST_CHECK(!bucket.isFullyBlacklisted());
for (size_t i = 0; i < 7; i++) {
BOOST_CHECK(!bucket.isBlacklisted(hashes[i]));
}
bucket.unlock();
// cleanup
for (size_t i = 0; i < 6; i++) {
delete ptrs[i];
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief generate tests
////////////////////////////////////////////////////////////////////////////////
BOOST_AUTO_TEST_SUITE_END()
// Local Variables:
// mode: outline-minor
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
// --SECTION--\\|/// @\\}\\)"
// End:

View File

@ -25,13 +25,7 @@
#include "Agency/Agent.h"
#include "Agency/Job.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
AddFollower::AddFollower(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,

View File

@ -1,4 +1,4 @@
////////////////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
@ -811,7 +811,7 @@ bool AgencyComm::exists(std::string const& key) {
return false;
}
auto parts = arangodb::basics::StringUtils::split(key, "/");
auto parts = basics::StringUtils::split(key, "/");
std::vector<std::string> allParts;
allParts.reserve(parts.size() + 1);
allParts.push_back(AgencyCommManager::path());
@ -1130,7 +1130,7 @@ bool AgencyComm::ensureStructureInitialized() {
std::vector<std::string>({AgencyCommManager::path(), "Secret"}));
if (!secretValue.isString()) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Couldn't find secret in agency!";
LOG_TOPIC(ERR, Logger::CLUSTER) << "Couldn't find secret in agency!";
return false;
}
std::string const secret = secretValue.copyString();
@ -1489,16 +1489,7 @@ AgencyCommResult AgencyComm::send(
<< "': " << body;
arangodb::httpclient::SimpleHttpClient client(connection, timeout, false);
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr only happens during controlled shutdown
result._message = "could not send request to agency because of shutdown";
LOG_TOPIC(TRACE, Logger::AGENCYCOMM)
<< "could not send request to agency because of shutdown";
return result;
}
client.setJwt(cc->jwt());
client.setJwt(ClusterComm::instance()->jwt());
client.keepConnectionOnDestruction(true);
// set up headers
@ -1699,10 +1690,10 @@ bool AgencyComm::tryInitializeStructure(std::string const& jwtSecret) {
return result.successful();
} catch (std::exception const& e) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Fatal error initializing agency " << e.what();
LOG_TOPIC(FATAL, Logger::CLUSTER) << "Fatal error initializing agency " << e.what();
FATAL_ERROR_EXIT();
} catch (...) {
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Fatal error initializing agency";
LOG_TOPIC(FATAL, Logger::CLUSTER) << "Fatal error initializing agency";
FATAL_ERROR_EXIT();
}
}

View File

@ -629,6 +629,14 @@ class AgencyComm {
void updateEndpoints(arangodb::velocypack::Slice const&);
bool lockRead(std::string const&, double, double);
bool lockWrite(std::string const&, double, double);
bool unlockRead(std::string const&, double);
bool unlockWrite(std::string const&, double);
AgencyCommResult sendTransactionWithFailover(AgencyTransaction const&,
double timeout = 0.0);

View File

@ -1,3 +1,4 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
@ -47,15 +48,18 @@ Agent::Agent(config_t const& config)
_config(config),
_lastCommitIndex(0),
_lastAppliedIndex(0),
_lastCompactionIndex(0),
_leaderCommitIndex(0),
_spearhead(this),
_readDB(this),
_transient(this),
_compacted(this),
_nextCompationAfter(_config.compactionStepSize()),
_inception(std::make_unique<Inception>(this)),
_activator(nullptr),
_compactor(this),
_ready(false) {
_ready(false),
_preparing(false) {
_state.configure(this);
_constituent.configure(this);
}
@ -153,7 +157,7 @@ std::string Agent::leaderID() const {
/// Are we leading?
bool Agent::leading() const {
return _constituent.leading();
return _preparing || _constituent.leading();
}
/// Start constituent personality
@ -272,14 +276,14 @@ bool Agent::recvAppendEntriesRPC(
// Update commit index
if (queries->slice().type() != VPackValueType::Array) {
LOG_TOPIC(WARN, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Received malformed entries for appending. Discarding!";
return false;
}
if (!_constituent.checkLeader(term, leaderId, prevIndex, prevTerm)) {
LOG_TOPIC(WARN, Logger::AGENCY) << "Not accepting appendEntries from "
<< leaderId;
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Not accepting appendEntries from " << leaderId;
return false;
}
@ -324,8 +328,8 @@ bool Agent::recvAppendEntriesRPC(
/// Leader's append entries
void Agent::sendAppendEntriesRPC() {
std::chrono::duration<int, std::ratio<1, 1000000>> const dt (
(_config.waitForSync() ? 40000 : 2000));
std::chrono::duration<int, std::ratio<1, 1000>> const dt (
(_config.waitForSync() ? 40 : 2));
auto cc = ClusterComm::instance();
if (cc == nullptr) {
// nullptr only happens during controlled shutdown
@ -351,12 +355,6 @@ void Agent::sendAppendEntriesRPC() {
std::vector<log_t> unconfirmed = _state.get(last_confirmed);
if (unconfirmed.empty()) {
// this can only happen if the log is totally empty (I think, Max)
// and so it is OK, to skip the time check here
continue;
}
index_t highest = unconfirmed.back().index;
// _lastSent, _lastHighest: local and single threaded access
@ -378,7 +376,8 @@ void Agent::sendAppendEntriesRPC() {
// Body
Builder builder;
builder.add(VPackValue(VPackValueType::Array));
if ((system_clock::now() - _earliestPackage[followerId]).count() > 0) {
if (!_preparing &&
((system_clock::now() - _earliestPackage[followerId]).count() > 0)) {
for (size_t i = 1; i < unconfirmed.size(); ++i) {
auto const& entry = unconfirmed.at(i);
builder.add(VPackValue(VPackValueType::Object));
@ -413,8 +412,9 @@ void Agent::sendAppendEntriesRPC() {
"1", 1, _config.poolAt(followerId),
arangodb::rest::RequestType::POST, path.str(),
std::make_shared<std::string>(builder.toJson()), headerFields,
std::make_shared<AgentCallback>(this, followerId, highest, toLog),
5.0 * _config.maxPing(), true);
std::make_shared<AgentCallback>(
this, followerId, (toLog) ? highest : 0, toLog),
std::max(1.0e-3 * toLog * dt.count(), 0.25 * _config.minPing()), true);
// _lastSent, _lastHighest: local and single threaded access
_lastSent[followerId] = system_clock::now();
@ -422,7 +422,7 @@ void Agent::sendAppendEntriesRPC() {
if (toLog > 0) {
_earliestPackage[followerId] = system_clock::now() + toLog * dt;
LOG_TOPIC(TRACE, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Appending " << unconfirmed.size() - 1 << " entries up to index "
<< highest << " to follower " << followerId << ". Message: "
<< builder.toJson()
@ -430,7 +430,7 @@ void Agent::sendAppendEntriesRPC() {
<< std::chrono::duration<double, std::milli>(
_earliestPackage[followerId]-system_clock::now()).count() << "ms";
} else {
LOG_TOPIC(TRACE, Logger::AGENCY)
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Just keeping follower " << followerId
<< " devout with " << builder.toJson();
}
@ -837,7 +837,7 @@ void Agent::run() {
sendAppendEntriesRPC();
// Don't panic
_appendCV.wait(1000);
_appendCV.wait(100);
// Detect faulty agent and replace
// if possible and only if not already activating
@ -1000,6 +1000,7 @@ void Agent::beginShutdown() {
void Agent::prepareLead() {
_preparing = true;
// Key value stores
rebuildDBs();
@ -1020,9 +1021,11 @@ void Agent::lead() {
// Wake up run
{
CONDITION_LOCKER(guard, _appendCV);
_preparing = false;
guard.broadcast();
}
// Agency configuration
term_t myterm;
{
@ -1169,17 +1172,23 @@ arangodb::consensus::index_t Agent::rebuildDBs() {
// Apply logs from last applied index to leader's commit index
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Rebuilding kvstores from index "
<< "Rebuilding key-value stores from index "
<< _lastAppliedIndex << " to " << _leaderCommitIndex;
_spearhead.apply(
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex),
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1),
_leaderCommitIndex, _constituent.term());
_readDB.apply(
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex),
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1),
_leaderCommitIndex, _constituent.term());
_compacted.apply(
_state.slices(_lastCompactionIndex+1, _leaderCommitIndex+1),
_leaderCommitIndex, _constituent.term());
_lastAppliedIndex = _leaderCommitIndex;
_lastCompactionIndex = _leaderCommitIndex;
return _lastAppliedIndex;
@ -1195,9 +1204,11 @@ void Agent::compact() {
/// Last commit index
arangodb::consensus::index_t Agent::lastCommitted() const {
std::pair<arangodb::consensus::index_t, arangodb::consensus::index_t>
Agent::lastCommitted() const {
MUTEX_LOCKER(ioLocker, _ioLock);
return _lastCommitIndex;
return std::pair<arangodb::consensus::index_t, arangodb::consensus::index_t>(
_lastCommitIndex,_leaderCommitIndex);
}
/// Last commit index
@ -1382,8 +1393,42 @@ bool Agent::ready() const {
return true;
}
return _ready.load();
return _ready;
}
query_t Agent::buildDB(arangodb::consensus::index_t index) {
auto builder = std::make_shared<VPackBuilder>();
arangodb::consensus::index_t start = 0, end = 0;
Store store(this);
{
MUTEX_LOCKER(ioLocker, _ioLock);
store = _compacted;
MUTEX_LOCKER(liLocker, _liLock);
end = _leaderCommitIndex;
start = _lastCompactionIndex+1;
}
if (index > end) {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Cannot snapshot beyond leaderCommitIndex: " << end;
index = end;
} else if (index < start) {
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Cannot snapshot before last compaction index: " << start;
index = start+1;
}
store.apply(_state.slices(start+1, index), index, _constituent.term());
store.toBuilder(*builder);
return builder;
}
}} // namespace

View File

@ -77,7 +77,7 @@ class Agent : public arangodb::Thread {
bool fitness() const;
/// @brief Leader ID
index_t lastCommitted() const;
std::pair<index_t, index_t> lastCommitted() const;
/// @brief Leader ID
std::string leaderID() const;
@ -222,6 +222,9 @@ class Agent : public arangodb::Thread {
/// @brief Update a peers endpoint in my configuration
void updatePeerEndpoint(std::string const& id, std::string const& ep);
/// @brief Assemble an agency to commitId
query_t buildDB(index_t);
/// @brief State reads persisted state and prepares the agent
friend class State;
friend class Compactor;
@ -270,6 +273,9 @@ class Agent : public arangodb::Thread {
/// @brief Last compaction index
index_t _lastAppliedIndex;
/// @brief Last compaction index
index_t _lastCompactionIndex;
/// @brief Last compaction index
index_t _leaderCommitIndex;
@ -282,6 +288,9 @@ class Agent : public arangodb::Thread {
/// @brief Committed (read) kv-store
Store _transient;
/// @brief Last compacted store
Store _compacted;
/// @brief Condition variable for appendEntries
arangodb::basics::ConditionVariable _appendCV;
@ -326,6 +335,7 @@ class Agent : public arangodb::Thread {
/// @brief Agent is ready for RAFT
std::atomic<bool> _ready;
std::atomic<bool> _preparing;
/// @brief Keep track of when I last took on leadership
TimePoint _leaderSince;

View File

@ -39,23 +39,43 @@ AgentCallback::AgentCallback(Agent* agent, std::string const& slaveID,
void AgentCallback::shutdown() { _agent = 0; }
bool AgentCallback::operator()(arangodb::ClusterCommResult* res) {
if (res->status == CL_COMM_SENT) {
if (_agent) {
_agent->reportIn(_slaveID, _last, _toLog);
try { // Check success
if (res->result->getBodyVelocyPack()->slice().get("success").getBool()) {
_agent->reportIn(_slaveID, _last, _toLog);
}
LOG_TOPIC(DEBUG, Logger::CLUSTER)
<< "success: true " << res->result->getBodyVelocyPack()->toJson();
} catch (...) {
LOG_TOPIC(INFO, Logger::CLUSTER)
<< "success: false" << res->result->getBodyVelocyPack()->toJson();
}
}
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Got good callback from AppendEntriesRPC: "
<< "comm_status(" << res->status
<< "), last(" << _last << "), follower("
<< _slaveID << "), time("
<< TRI_microtime() - _startTime << ")";
} else {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Got bad callback from AppendEntriesRPC: "
<< "comm_status(" << res->status
<< "), last(" << _last << "), follower("
<< _slaveID << "), time("
<< TRI_microtime() - _startTime << ")";
}
return true;
}

View File

@ -28,7 +28,6 @@
#include "Agency/MoveShard.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
CleanOutServer::CleanOutServer(Node const& snapshot, Agent* agent,
std::string const& jobId,

View File

@ -146,6 +146,21 @@ void Constituent::termNoLock(term_t t) {
}
}
bool Constituent::logUpToDate(
arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) const {
log_t myLastLogEntry = _agent->state().lastLog();
return (prevLogTerm > myLastLogEntry.term ||
(prevLogTerm == myLastLogEntry.term &&
prevLogIndex >= myLastLogEntry.index));
}
bool Constituent::logMatches(
arangodb::consensus::index_t prevLogIndex, term_t prevLogTerm) const {
return _agent->state().has(prevLogIndex, prevLogTerm);
}
/// My role
role_t Constituent::role() const {
MUTEX_LOCKER(guard, _castLock);
@ -257,8 +272,8 @@ std::string Constituent::endpoint(std::string id) const {
}
/// @brief Check leader
bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex,
term_t prevLogTerm) {
bool Constituent::checkLeader(
term_t term, std::string id, index_t prevLogIndex, term_t prevLogTerm) {
TRI_ASSERT(_vocbase != nullptr);
@ -277,6 +292,11 @@ bool Constituent::checkLeader(term_t term, std::string id, index_t prevLogIndex,
if (term > _term) {
termNoLock(term);
}
if (!logMatches(prevLogIndex,prevLogTerm)) {
return false;
}
if (_leaderID != id) {
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Set _leaderID to " << id << " in term " << _term;
@ -421,7 +441,7 @@ void Constituent::callElection() {
auto res = ClusterComm::instance()->wait(
"", coordinatorTransactionID, 0, "",
duration<double>(steady_clock::now()-timeout).count());
duration<double>(timeout - steady_clock::now()).count());
if (res.status == CL_COMM_SENT) {
auto body = res.result->getBodyVelocyPack();
@ -571,6 +591,11 @@ void Constituent::run() {
if (_lastHeartbeatSeen > 0.0) {
double now = TRI_microtime();
randWait -= static_cast<int64_t>(M * (now-_lastHeartbeatSeen));
if (randWait < a) {
randWait = a;
} else if (randWait > b) {
randWait = b;
}
}
}

View File

@ -126,6 +126,12 @@ class Constituent : public Thread {
// Wait for sync
bool waitForSync() const;
// Check if log up to date with ours
bool logUpToDate(index_t, term_t) const;
// Check if log start matches entry in my log
bool logMatches(index_t, term_t) const;
// Sleep for how long
duration_t sleepFor(double, double);

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedFollower::FailedFollower(Node const& snapshot, Agent* agent,
std::string const& jobId,
@ -125,6 +124,7 @@ bool FailedFollower::start() {
Node const& planned = _snapshot(planPath);
// Copy todo to pending
Builder todo, pending;

View File

@ -30,7 +30,6 @@
#include <vector>
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedLeader::FailedLeader(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,
@ -173,17 +172,23 @@ bool FailedLeader::start() {
// Distribute shards like to come!
std::vector<std::string> planv;
for (auto const& i : VPackArrayIterator(planned)) {
planv.push_back(i.copyString());
auto s = i.copyString();
if (s != _from && s != _to) {
planv.push_back(i.copyString());
}
}
pending.add(_agencyPrefix + planPath, VPackValue(VPackValueType::Array));
pending.add(VPackValue(_to));
for (auto const& i : VPackArrayIterator(current)) {
std::string s = i.copyString();
if (s != _from) {
if (s != _from && s != _to) {
pending.add(i);
planv.erase(std::remove(planv.begin(), planv.end(), s), planv.end());
}
}
pending.add(VPackValue(_from));
for (auto const& i : planv) {
pending.add(VPackValue(i));

View File

@ -30,7 +30,6 @@
#include "Agency/UnassumedLeadership.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
FailedServer::FailedServer(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,
@ -286,7 +285,9 @@ JOB_STATUS FailedServer::status() {
deleteTodos->openArray();
deleteTodos->openObject();
}
deleteTodos->add(_agencyPrefix + toDoPrefix + subJob.first, VPackValue(VPackValueType::Object));
deleteTodos->add(
_agencyPrefix + toDoPrefix + subJob.first,
VPackValue(VPackValueType::Object));
deleteTodos->add("op", VPackValue("delete"));
deleteTodos->close();
} else {
@ -302,7 +303,9 @@ JOB_STATUS FailedServer::status() {
}
if (deleteTodos) {
LOG_TOPIC(INFO, Logger::AGENCY) << "Server " << _server << " is healthy again. Will try to delete any jobs which have not yet started!";
LOG_TOPIC(INFO, Logger::AGENCY)
<< "Server " << _server << " is healthy again. Will try to delete"
"any jobs which have not yet started!";
deleteTodos->close();
deleteTodos->close();
// Transact to agency

View File

@ -36,7 +36,6 @@
#include <thread>
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
Inception::Inception() : Thread("Inception"), _agent(nullptr) {}

View File

@ -24,7 +24,6 @@
#include "Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
bool arangodb::consensus::compareServerLists(Slice plan, Slice current) {
if (!plan.isArray() || !current.isArray()) {

View File

@ -28,7 +28,6 @@
#include "Node.h"
#include "Supervision.h"
#include <velocypack/Builder.h>
#include <velocypack/Iterator.h>
#include <velocypack/Slice.h>
#include <velocypack/velocypack-aliases.h>
@ -42,7 +41,7 @@ namespace consensus {
// and all others followers. Both arguments must be arrays. Returns true,
// if the first items in both slice are equal and if both arrays contain
// the same set of strings.
bool compareServerLists(arangodb::velocypack::Slice plan, arangodb::velocypack::Slice current);
bool compareServerLists(Slice plan, Slice current);
enum JOB_STATUS { TODO, PENDING, FINISHED, FAILED, NOTFOUND };
const std::vector<std::string> pos({"/Target/ToDo/", "/Target/Pending/",
@ -64,9 +63,9 @@ static std::string const plannedServers = "/Plan/DBServers";
static std::string const healthPrefix = "/Supervision/Health/";
inline arangodb::consensus::write_ret_t transact(Agent* _agent,
arangodb::velocypack::Builder const& transaction,
Builder const& transaction,
bool waitForCommit = true) {
query_t envelope = std::make_shared<arangodb::velocypack::Builder>();
query_t envelope = std::make_shared<Builder>();
try {
envelope->openArray();
@ -138,7 +137,7 @@ struct Job {
std::string _creator;
std::string _agencyPrefix;
std::shared_ptr<arangodb::velocypack::Builder> _jb;
std::shared_ptr<Builder> _jb;
};

View File

@ -29,7 +29,6 @@
static std::string const DBServer = "DBServer";
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
MoveShard::MoveShard(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,

View File

@ -33,9 +33,8 @@
#include <deque>
#include <regex>
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
using namespace arangodb::basics;
struct NotEmpty {
bool operator()(const std::string& s) { return !s.empty(); }
@ -700,28 +699,6 @@ void Node::toBuilder(Builder& builder, bool showHidden) const {
}
}
void Node::toObject(Builder& builder, bool showHidden) const {
try {
if (type() == NODE) {
VPackObjectBuilder guard(&builder);
for (auto const& child : _children) {
if (child.first[0] == '.' && !showHidden) {
continue;
}
builder.add(VPackValue(child.first));
child.second->toBuilder(builder);
}
} else {
if (!slice().isNone()) {
builder.add(slice());
}
}
} catch (std::exception const& e) {
LOG_TOPIC(ERR, Logger::AGENCY) << e.what() << " " << __FILE__ << __LINE__;
}
}
// Print internals to ostream
std::ostream& Node::print(std::ostream& o) const {
Node const* par = _parent;

View File

@ -27,9 +27,6 @@
#include "AgencyCommon.h"
#include <velocypack/Buffer.h>
#include <velocypack/Builder.h>
#include <velocypack/Slice.h>
#include <velocypack/ValueType.h>
#include <velocypack/velocypack-aliases.h>
#include <type_traits>
@ -53,6 +50,8 @@ enum Operation {
REPLACE
};
using namespace arangodb::velocypack;
class StoreException : public std::exception {
public:
explicit StoreException(std::string const& message) : _message(message) {}
@ -162,10 +161,7 @@ class Node {
bool handle(arangodb::velocypack::Slice const&);
/// @brief Create Builder representing this store
void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const;
/// @brief Create Builder representing this store
void toObject(arangodb::velocypack::Builder&, bool showHidden = false) const;
void toBuilder(Builder&, bool showHidden = false) const;
/// @brief Access children
Children& children();
@ -174,10 +170,10 @@ class Node {
Children const& children() const;
/// @brief Create slice from value
arangodb::velocypack::Slice slice() const;
Slice slice() const;
/// @brief Get value type
arangodb::velocypack::ValueType valueType() const;
ValueType valueType() const;
/// @brief Add observer for this node
bool addObserver(std::string const&);
@ -222,7 +218,7 @@ class Node {
std::string getString() const;
/// @brief Get array value
arangodb::velocypack::Slice getArray() const;
Slice getArray() const;
protected:
/// @brief Add time to live entry
@ -238,8 +234,8 @@ class Node {
Store* _store; ///< @brief Store
Children _children; ///< @brief child nodes
TimePoint _ttl; ///< @brief my expiry
std::vector<arangodb::velocypack::Buffer<uint8_t>> _value; ///< @brief my value
mutable arangodb::velocypack::Buffer<uint8_t> _vecBuf;
std::vector<Buffer<uint8_t>> _value; ///< @brief my value
mutable Buffer<uint8_t> _vecBuf;
mutable bool _vecBufDirty;
bool _isArray;
};

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
RemoveServer::RemoveServer(Node const& snapshot, Agent* agent,
std::string const& jobId, std::string const& creator,

View File

@ -32,12 +32,13 @@
#include "Basics/StaticStrings.h"
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::rest;
using namespace arangodb::velocypack;
using namespace arangodb::consensus;
////////////////////////////////////////////////////////////////////////////////
/// @brief ArangoDB server
@ -218,6 +219,31 @@ RestStatus RestAgencyHandler::handleStores() {
return RestStatus::DONE;
}
RestStatus RestAgencyHandler::handleStore() {
if (_request->requestType() == rest::RequestType::POST) {
arangodb::velocypack::Options options;
auto query = _request->toVelocyPackBuilderPtr(&options);
arangodb::consensus::index_t index = 0;
try {
index = query->slice().getUInt();
} catch (...) {
index = _agent->lastCommitted().second;
}
query_t builder = _agent->buildDB(index);
generateResult(rest::ResponseCode::OK, builder->slice());
} else {
generateError(rest::ResponseCode::BAD, 400);
}
return RestStatus::DONE;
}
RestStatus RestAgencyHandler::handleWrite() {
if (_request->requestType() != rest::RequestType::POST) {
@ -624,12 +650,14 @@ RestStatus RestAgencyHandler::handleConfig() {
}
// Respond with configuration
auto last = _agent->lastCommitted();
Builder body;
{
VPackObjectBuilder b(&body);
body.add("term", Value(_agent->term()));
body.add("leaderId", Value(_agent->leaderID()));
body.add("lastCommitted", Value(_agent->lastCommitted()));
body.add("lastCommitted", Value(last.first));
body.add("leaderCommitted", Value(last.second));
body.add("lastAcked", _agent->lastAckedAgo()->slice());
body.add("configuration", _agent->config().toBuilder()->slice());
}
@ -691,6 +719,8 @@ RestStatus RestAgencyHandler::execute() {
return handleState();
} else if (suffixes[0] == "stores") {
return handleStores();
} else if (suffixes[0] == "store") {
return handleStore();
} else {
return reportUnknownMethod();
}

View File

@ -47,6 +47,7 @@ class RestAgencyHandler : public RestBaseHandler {
RestStatus reportTooManySuffices();
RestStatus reportUnknownMethod();
RestStatus handleStores();
RestStatus handleStore();
RestStatus handleRead();
RestStatus handleWrite();
RestStatus handleTransact();

View File

@ -32,6 +32,7 @@
#include "Logger/Logger.h"
#include "Rest/HttpRequest.h"
#include "Rest/Version.h"
using namespace arangodb;

View File

@ -315,21 +315,66 @@ std::vector<log_t> State::get(arangodb::consensus::index_t start,
return entries;
}
if (end == (std::numeric_limits<uint64_t>::max)() || end > _log.size() - 1) {
end = _log.size() - 1;
if (end == (std::numeric_limits<uint64_t>::max)() || end > _log.back().index) {
end = _log.back().index;
}
if (start < _log[0].index) {
start = _log[0].index;
}
for (size_t i = start - _cur; i <= end; ++i) {
for (size_t i = start - _cur; i <= end - _cur; ++i) {
entries.push_back(_log[i]);
}
return entries;
}
/// Get log entries from indices "start" to "end"
/// Throws std::out_of_range exception
log_t State::at(arangodb::consensus::index_t index) const {
MUTEX_LOCKER(mutexLocker, _logLock); // Cannot be read lock (Compaction)
if (_cur > index) {
std::string excMessage =
std::string(
"Access before the start of the log deque: (first, requested): (") +
std::to_string(_cur) + ", " + std::to_string(index);
LOG_TOPIC(DEBUG, Logger::AGENCY) << excMessage;
throw std::out_of_range(excMessage);
}
auto pos = index - _cur;
if (pos > _log.size()) {
std::string excMessage =
std::string(
"Access beyond the end of the log deque: (last, requested): (") +
std::to_string(_cur+_log.size()) + ", " + std::to_string(index);
LOG_TOPIC(DEBUG, Logger::AGENCY) << excMessage;
throw std::out_of_range(excMessage);
}
return _log[pos];
}
/// Have log with specified index and term
bool State::has(arangodb::consensus::index_t index, term_t term) const {
MUTEX_LOCKER(mutexLocker, _logLock); // Cannot be read lock (Compaction)
try {
return _log.at(index-_cur).term == term;
} catch (...) {}
return false;
}
/// Get vector of past transaction from 'start' to 'end'
std::vector<VPackSlice> State::slices(arangodb::consensus::index_t start,
arangodb::consensus::index_t end) const {
@ -906,3 +951,9 @@ std::vector<std::vector<log_t>> State::inquire(query_t const& query) const {
}
// Index of last log entry
arangodb::consensus::index_t State::lastIndex() const {
MUTEX_LOCKER(mutexLocker, _logLock);
return (!_log.empty()) ? _log.back().index : 0;
}

View File

@ -66,21 +66,27 @@ class State {
std::vector<bool> const& indices, term_t term);
/// @brief Single log entry (leader)
arangodb::consensus::index_t log(
velocypack::Slice const& slice, term_t term,
std::string const& clientId = std::string());
index_t log(velocypack::Slice const& slice, term_t term,
std::string const& clientId = std::string());
/// @brief Log entries (followers)
arangodb::consensus::index_t log(query_t const& queries, size_t ndups = 0);
/// @brief Find entry at index with term
bool find(index_t index, term_t term);
/// @brief Get complete log entries bound by lower and upper bounds.
/// Default: [first, last]
std::vector<log_t> get(
index_t = 0, index_t = (std::numeric_limits<uint64_t>::max)()) const;
index_t = 0, index_t = (std::numeric_limits<uint64_t>::max)()) const;
/// @brief Get complete log entries bound by lower and upper bounds.
/// Default: [first, last]
log_t at(index_t) const;
/// @brief Has entry with index und term
bool has(index_t, term_t) const;
/// @brief Get log entries by client Id
std::vector<std::vector<log_t>> inquire(query_t const&) const;
@ -96,6 +102,10 @@ class State {
/// after the return
log_t lastLog() const;
/// @brief last log entry, copy entry because we do no longer have the lock
/// after the return
index_t lastIndex() const;
/// @brief Set endpoint
bool configure(Agent* agent);

View File

@ -40,9 +40,8 @@
#include <iomanip>
#include <regex>
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
using namespace arangodb::basics;
/// Non-Emptyness of string
struct NotEmpty {
@ -353,15 +352,11 @@ std::vector<bool> Store::apply(
auto headerFields =
std::make_unique<std::unordered_map<std::string, std::string>>();
auto cc = ClusterComm::instance();
if (cc != nullptr) {
// nullptr only happens on controlled shutdown
cc->asyncRequest(
"1", 1, endpoint, rest::RequestType::POST, path,
std::make_shared<std::string>(body.toString()), headerFields,
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true,
0.01);
}
arangodb::ClusterComm::instance()->asyncRequest(
"1", 1, endpoint, rest::RequestType::POST, path,
std::make_shared<std::string>(body.toString()), headerFields,
std::make_shared<StoreCallback>(path, body.toJson()), 1.0, true, 0.01);
} else {
LOG_TOPIC(WARN, Logger::AGENCY) << "Malformed URL " << url;
}

View File

@ -60,10 +60,10 @@ class Store : public arangodb::Thread {
std::vector<bool> apply(query_t const& query, bool verbose = false);
/// @brief Apply single entry in query
bool apply(arangodb::velocypack::Slice const& query, bool verbose = false);
bool apply(Slice const& query, bool verbose = false);
/// @brief Apply entry in query
std::vector<bool> apply(std::vector<arangodb::velocypack::Slice> const& query,
std::vector<bool> apply(std::vector<Slice> const& query,
index_t lastCommitIndex, term_t term,
bool inform = true);
@ -81,7 +81,7 @@ class Store : public arangodb::Thread {
bool start();
/// @brief Dump everything to builder
void dumpToBuilder(arangodb::velocypack::Builder&) const;
void dumpToBuilder(Builder&) const;
/// @brief Notify observers
void notifyObservers() const;
@ -92,7 +92,7 @@ class Store : public arangodb::Thread {
Store& operator=(VPackSlice const& slice);
/// @brief Create Builder representing this store
void toBuilder(arangodb::velocypack::Builder&, bool showHidden = false) const;
void toBuilder(Builder&, bool showHidden = false) const;
/// @brief Copy out a node
Node get(std::string const& path = std::string("/")) const;

View File

@ -41,9 +41,9 @@
#include "Basics/MutexLocker.h"
using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
using namespace arangodb::application_features;
std::string Supervision::_agencyPrefix = "/arango";
@ -552,11 +552,13 @@ void Supervision::handleShutdown() {
del->close();
auto result = _agent->write(del);
if (result.indices.size() != 1) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Invalid resultsize of " << result.indices.size()
<< " found during shutdown";
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Invalid resultsize of " << result.indices.size()
<< " found during shutdown";
} else {
if (_agent->waitFor(result.indices.at(0)) != Agent::raft_commit_t::OK) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "Result was not written to followers during shutdown";
LOG_TOPIC(ERR, Logger::AGENCY)
<< "Result was not written to followers during shutdown";
}
}
}

View File

@ -27,7 +27,6 @@
#include "Agency/Job.h"
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
UnassumedLeadership::UnassumedLeadership(
Node const& snapshot, Agent* agent, std::string const& jobId,

View File

@ -39,7 +39,6 @@ using namespace arangodb;
using namespace arangodb::application_features;
using namespace arangodb::basics;
using namespace arangodb::consensus;
using namespace arangodb::velocypack;
static void JS_EnabledAgent(v8::FunctionCallbackInfo<v8::Value> const& args) {
TRI_V8_TRY_CATCH_BEGIN(isolate);

View File

@ -33,44 +33,43 @@ using namespace arangodb;
using namespace arangodb::aql;
using namespace arangodb::basics;
Aggregator* Aggregator::fromTypeString(transaction::Methods* trx,
std::string const& type) {
std::unique_ptr<Aggregator> Aggregator::fromTypeString(transaction::Methods* trx,
std::string const& type) {
if (type == "LENGTH" || type == "COUNT") {
return new AggregatorLength(trx);
return std::make_unique<AggregatorLength>(trx);
}
if (type == "MIN") {
return new AggregatorMin(trx);
return std::make_unique<AggregatorMin>(trx);
}
if (type == "MAX") {
return new AggregatorMax(trx);
return std::make_unique<AggregatorMax>(trx);
}
if (type == "SUM") {
return new AggregatorSum(trx);
return std::make_unique<AggregatorSum>(trx);
}
if (type == "AVERAGE" || type == "AVG") {
return new AggregatorAverage(trx);
return std::make_unique<AggregatorAverage>(trx);
}
if (type == "VARIANCE_POPULATION" || type == "VARIANCE") {
return new AggregatorVariance(trx, true);
return std::make_unique<AggregatorVariance>(trx, true);
}
if (type == "VARIANCE_SAMPLE") {
return new AggregatorVariance(trx, false);
return std::make_unique<AggregatorVariance>(trx, false);
}
if (type == "STDDEV_POPULATION" || type == "STDDEV") {
return new AggregatorStddev(trx, true);
return std::make_unique<AggregatorStddev>(trx, true);
}
if (type == "STDDEV_SAMPLE") {
return new AggregatorStddev(trx, false);
return std::make_unique<AggregatorStddev>(trx, false);
}
// aggregator function name should have been validated before
TRI_ASSERT(false);
return nullptr;
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid aggregator type");
}
Aggregator* Aggregator::fromVPack(transaction::Methods* trx,
arangodb::velocypack::Slice const& slice,
char const* variableName) {
std::unique_ptr<Aggregator> Aggregator::fromVPack(transaction::Methods* trx,
arangodb::velocypack::Slice const& slice,
char const* variableName) {
VPackSlice variable = slice.get(variableName);
if (variable.isString()) {

View File

@ -49,10 +49,10 @@ struct Aggregator {
virtual void reduce(AqlValue const&) = 0;
virtual AqlValue stealValue() = 0;
static Aggregator* fromTypeString(transaction::Methods*,
std::string const&);
static Aggregator* fromVPack(transaction::Methods*,
arangodb::velocypack::Slice const&, char const*);
static std::unique_ptr<Aggregator> fromTypeString(transaction::Methods*,
std::string const&);
static std::unique_ptr<Aggregator> fromVPack(transaction::Methods*,
arangodb::velocypack::Slice const&, char const*);
static bool isSupported(std::string const&);
static bool requiresInput(std::string const&);

View File

@ -42,11 +42,10 @@ int AqlTransaction::processCollection(aql::Collection* collection) {
/// @brief add a coordinator collection to the transaction
int AqlTransaction::processCollectionCoordinator(aql::Collection* collection) {
TRI_voc_cid_t cid =
this->resolver()->getCollectionId(collection->getName());
TRI_voc_cid_t cid = resolver()->getCollectionId(collection->getName());
return this->addCollection(cid, collection->getName().c_str(),
collection->accessType);
return addCollection(cid, collection->getName().c_str(),
collection->accessType);
}
/// @brief add a regular collection to the transaction
@ -55,7 +54,7 @@ int AqlTransaction::processCollectionNormal(aql::Collection* collection) {
TRI_voc_cid_t cid = 0;
arangodb::LogicalCollection const* col =
this->resolver()->getCollectionStruct(collection->getName());
resolver()->getCollectionStruct(collection->getName());
/*if (col == nullptr) {
auto startTime = TRI_microtime();
auto endTime = startTime + 60.0;
@ -72,8 +71,7 @@ int AqlTransaction::processCollectionNormal(aql::Collection* collection) {
cid = col->cid();
}
int res =
this->addCollection(cid, collection->getName(), collection->accessType);
int res = addCollection(cid, collection->getName(), collection->accessType);
if (res == TRI_ERROR_NO_ERROR && col != nullptr) {
collection->setCollection(const_cast<arangodb::LogicalCollection*>(col));

View File

@ -62,14 +62,14 @@ class AqlTransaction final : public transaction::Methods {
/// @brief add a list of collections to the transaction
int addCollections(
std::map<std::string, aql::Collection*> const& collections) {
int ret = TRI_ERROR_NO_ERROR;
for (auto const& it : collections) {
ret = processCollection(it.second);
if (ret != TRI_ERROR_NO_ERROR) {
break;
int res = processCollection(it.second);
if (res != TRI_ERROR_NO_ERROR) {
return res;
}
}
return ret;
return TRI_ERROR_NO_ERROR;
}
/// @brief add a collection to the transaction

View File

@ -853,7 +853,9 @@ void AqlValue::toVelocyPack(transaction::Methods* trx,
case VPACK_INLINE:
case VPACK_MANAGED: {
if (resolveExternals) {
arangodb::basics::VelocyPackHelper::SanitizeExternals(slice(), builder);
bool const sanitizeExternals = true;
bool const sanitizeCustom = true;
arangodb::basics::VelocyPackHelper::sanitizeNonClientTypes(slice(), VPackSlice::noneSlice(), builder, trx->transactionContextPtr()->getVPackOptions(), sanitizeExternals, sanitizeCustom);
} else {
builder.add(slice());
}

View File

@ -95,12 +95,7 @@ std::unordered_map<int, AstNodeType> const Ast::ReversedOperators{
/// @brief create the AST
Ast::Ast(Query* query)
: _query(query),
_scopes(),
_variables(),
_bindParameters(),
_root(nullptr),
_queries(),
_writeCollections(),
_functionsMayAccessDocuments(false),
_containsTraversal(false) {
TRI_ASSERT(_query != nullptr);

View File

@ -40,14 +40,10 @@ namespace velocypack {
class Slice;
}
namespace transaction {
class Methods;
}
;
namespace aql {
class Query;
class VariableGenerator;
typedef std::unordered_map<Variable const*, std::unordered_set<std::string>>
TopLevelAttributes;
@ -58,7 +54,7 @@ class Ast {
public:
/// @brief create the AST
Ast(Query*);
explicit Ast(Query*);
/// @brief destroy the AST
~Ast();
@ -69,12 +65,7 @@ class Ast {
/// @brief return the variable generator
inline VariableGenerator* variables() { return &_variables; }
/// @brief return the variable generator
inline VariableGenerator* variables() const {
return const_cast<VariableGenerator*>(&_variables);
}
/// @brief return the root of the AST
inline AstNode const* root() const { return _root; }

View File

@ -39,7 +39,9 @@ class BindParameters {
public:
BindParameters(BindParameters const&) = delete;
BindParameters& operator=(BindParameters const&) = delete;
BindParameters() = delete;
BindParameters()
: _builder(nullptr), _parameters(), _processed(false) {}
/// @brief create the parameters
explicit BindParameters(std::shared_ptr<arangodb::velocypack::Builder> builder)

View File

@ -25,6 +25,7 @@
#include "Aql/AqlItemBlock.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Functions.h"
#include "Aql/Query.h"
#include "Basics/Exceptions.h"
#include "Basics/ScopeGuard.h"
#include "Basics/VelocyPackHelper.h"

View File

@ -29,6 +29,7 @@
#include "Aql/Collection.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionStats.h"
#include "Aql/Query.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h"

View File

@ -25,6 +25,7 @@
#include "Aql/Ast.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
using namespace arangodb::basics;
using namespace arangodb::aql;

View File

@ -59,9 +59,6 @@ SortedCollectBlock::CollectGroup::~CollectGroup() {
for (auto& it : groupBlocks) {
delete it;
}
for (auto& it : aggregators) {
delete it;
}
}
void SortedCollectBlock::CollectGroup::initialize(size_t capacity) {
@ -79,7 +76,6 @@ void SortedCollectBlock::CollectGroup::initialize(size_t capacity) {
// reset aggregators
for (auto& it : aggregators) {
TRI_ASSERT(it != nullptr);
it->reset();
}
}
@ -102,7 +98,6 @@ void SortedCollectBlock::CollectGroup::reset() {
// reset all aggregators
for (auto& it : aggregators) {
TRI_ASSERT(it != nullptr);
it->reset();
}
@ -185,7 +180,7 @@ SortedCollectBlock::SortedCollectBlock(ExecutionEngine* engine,
_aggregateRegisters.emplace_back(
std::make_pair((*itOut).second.registerId, reg));
_currentGroup.aggregators.emplace_back(
Aggregator::fromTypeString(_trx, p.second.second));
std::move(Aggregator::fromTypeString(_trx, p.second.second)));
}
TRI_ASSERT(_aggregateRegisters.size() == en->_aggregateVariables.size());
TRI_ASSERT(_aggregateRegisters.size() == _currentGroup.aggregators.size());
@ -617,14 +612,8 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
// cleanup function for group values
auto cleanup = [&allGroups]() -> void {
for (auto& it : allGroups) {
if (it.second != nullptr) {
for (auto& it2 : *(it.second)) {
delete it2;
}
delete it.second;
}
delete it.second;
}
allGroups.clear();
};
// prevent memory leaks by always cleaning up the groups
@ -643,8 +632,8 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
size_t row = 0;
for (auto& it : allGroups) {
auto& keys = it.first;
TRI_ASSERT(it.second != nullptr);
TRI_ASSERT(keys.size() == _groupRegisters.size());
size_t i = 0;
@ -653,7 +642,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
const_cast<AqlValue*>(&key)->erase(); // to prevent double-freeing later
}
if (it.second != nullptr && !en->_count) {
if (!en->_count) {
TRI_ASSERT(it.second->size() == _aggregateRegisters.size());
size_t j = 0;
for (auto const& r : *(it.second)) {
@ -662,7 +651,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
}
} else if (en->_count) {
// set group count in result register
TRI_ASSERT(it.second != nullptr);
TRI_ASSERT(!it.second->empty());
result->setValue(row, _collectRegister,
it.second->back()->stealValue());
}
@ -722,7 +711,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
// no aggregate registers. this means we'll only count the number of
// items
if (en->_count) {
aggregateValues->emplace_back(new AggregatorLength(_trx, 1));
aggregateValues->emplace_back(std::move(std::make_unique<AggregatorLength>(_trx, 1)));
}
} else {
// we do have aggregate registers. create them as empty AqlValues
@ -732,7 +721,7 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
size_t j = 0;
for (auto const& r : en->_aggregateVariables) {
aggregateValues->emplace_back(
Aggregator::fromTypeString(_trx, r.second.second));
std::move(Aggregator::fromTypeString(_trx, r.second.second)));
aggregateValues->back()->reduce(
GetValueForRegister(cur, _pos, _aggregateRegisters[j].second));
++j;
@ -749,10 +738,12 @@ int HashedCollectBlock::getOrSkipSome(size_t atLeast, size_t atMost,
if (en->_aggregateVariables.empty()) {
// no aggregate registers. simply increase the counter
if (en->_count) {
TRI_ASSERT(!aggregateValues->empty());
aggregateValues->back()->reduce(AqlValue());
}
} else {
// apply the aggregators for the group
TRI_ASSERT(aggregateValues->size() == _aggregateRegisters.size());
size_t j = 0;
for (auto const& r : _aggregateRegisters) {
(*aggregateValues)[j]->reduce(

View File

@ -37,19 +37,16 @@ namespace arangodb {
namespace transaction {
class Methods;
}
;
namespace aql {
struct Aggregator;
class AqlItemBlock;
class ExecutionEngine;
typedef std::vector<Aggregator*> AggregateValuesType;
typedef std::vector<std::unique_ptr<Aggregator>> AggregateValuesType;
class SortedCollectBlock final : public ExecutionBlock {
private:
typedef std::vector<Aggregator*> AggregateValuesType;
struct CollectGroup {
std::vector<AqlValue> groupValues;

View File

@ -24,6 +24,7 @@
#include "CollectNode.h"
#include "Aql/Ast.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/VariableGenerator.h"
#include "Aql/WalkerWorker.h"
using namespace arangodb::aql;

View File

@ -52,6 +52,8 @@ class Collections {
std::map<std::string, Collection*> const* collections() const;
bool empty() const { return _collections.empty(); }
private:
TRI_vocbase_t* _vocbase;

View File

@ -26,6 +26,7 @@
#include "Aql/AstNode.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Aql/SortCondition.h"
#include "Aql/Variable.h"
#include "Basics/Exceptions.h"

View File

@ -26,6 +26,7 @@
#include "Aql/AqlItemBlock.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Query.h"
#include "Basics/Exceptions.h"
#include "Cluster/FollowerInfo.h"
#include "StorageEngine/DocumentIdentifierToken.h"

View File

@ -28,6 +28,7 @@
#include "Aql/Ast.h"
#include "Aql/BlockCollector.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Query.h"
using namespace arangodb::aql;

View File

@ -34,6 +34,7 @@
#include "Aql/ExecutionNode.h"
#include "Aql/IndexBlock.h"
#include "Aql/ModificationBlocks.h"
#include "Aql/Query.h"
#include "Aql/SortBlock.h"
#include "Aql/SubqueryBlock.h"
#include "Aql/TraversalBlock.h"

View File

@ -31,6 +31,7 @@
#include "Aql/ExecutionPlan.h"
#include "Aql/IndexNode.h"
#include "Aql/ModificationNodes.h"
#include "Aql/Query.h"
#include "Aql/SortNode.h"
#include "Aql/TraversalNode.h"
#include "Aql/ShortestPathNode.h"
@ -1375,7 +1376,7 @@ ExecutionNode* CalculationNode::clone(ExecutionPlan* plan,
outVariable = plan->getAst()->variables()->createVariable(outVariable);
}
auto c = new CalculationNode(plan, _id, _expression->clone(),
auto c = new CalculationNode(plan, _id, _expression->clone(plan->getAst()),
conditionVariable, outVariable);
c->_canRemoveIfThrows = _canRemoveIfThrows;

View File

@ -228,8 +228,6 @@ void ExecutionPlan::getCollectionsFromVelocyPack(Ast* ast,
}
for (auto const& collection : VPackArrayIterator(collectionsSlice)) {
auto typeStr = arangodb::basics::VelocyPackHelper::checkAndGetStringValue(
collection, "type");
ast->query()->collections()->add(
arangodb::basics::VelocyPackHelper::checkAndGetStringValue(collection,
"name"),
@ -276,8 +274,8 @@ class CloneNodeAdder final : public WalkerWorker<ExecutionNode> {
};
/// @brief clone an existing execution plan
ExecutionPlan* ExecutionPlan::clone() {
auto plan = std::make_unique<ExecutionPlan>(_ast);
ExecutionPlan* ExecutionPlan::clone(Ast* ast) {
auto plan = std::make_unique<ExecutionPlan>(ast);
plan->_root = _root->clone(plan.get(), true, false);
plan->_nextId = _nextId;
@ -297,13 +295,19 @@ ExecutionPlan* ExecutionPlan::clone() {
return plan.release();
}
/// @brief clone an existing execution plan
ExecutionPlan* ExecutionPlan::clone() {
return clone(_ast);
}
/// @brief create an execution plan identical to this one
/// keep the memory of the plan on the query object specified.
ExecutionPlan* ExecutionPlan::clone(Query const& query) {
auto otherPlan = std::make_unique<ExecutionPlan>(query.ast());
for (auto const& it : _ids) {
otherPlan->registerNode(it.second->clone(otherPlan.get(), false, true));
auto clonedNode = it.second->clone(otherPlan.get(), false, true);
otherPlan->registerNode(clonedNode);
}
return otherPlan.release();

View File

@ -28,7 +28,6 @@
#include "Aql/CollectOptions.h"
#include "Aql/ExecutionNode.h"
#include "Aql/ModificationOptions.h"
#include "Aql/Query.h"
#include "Aql/types.h"
#include "Basics/SmallVector.h"
@ -40,6 +39,7 @@ struct AstNode;
class CalculationNode;
class CollectNode;
class ExecutionNode;
class Query;
class ExecutionPlan {
public:
@ -60,6 +60,8 @@ class ExecutionPlan {
/// @brief create an execution plan from VelocyPack
static ExecutionPlan* instantiateFromVelocyPack(
Ast* ast, arangodb::velocypack::Slice const);
ExecutionPlan* clone(Ast*);
/// @brief clone the plan by recursively cloning starting from the root
ExecutionPlan* clone();
@ -69,9 +71,9 @@ class ExecutionPlan {
ExecutionPlan* clone(Query const&);
/// @brief export to VelocyPack
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack(Ast*, bool) const;
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack(Ast*, bool verbose) const;
void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool) const;
void toVelocyPack(arangodb::velocypack::Builder&, Ast*, bool verbose) const;
/// @brief check if the plan is empty
inline bool empty() const { return (_root == nullptr); }

View File

@ -107,10 +107,10 @@ class Expression {
}
/// @brief clone the expression, needed to clone execution plans
Expression* clone() {
Expression* clone(Ast* ast) {
// We do not need to copy the _ast, since it is managed by the
// query object and the memory management of the ASTs
return new Expression(_ast, _node);
return new Expression(ast != nullptr ? ast : _ast, _node);
}
/// @brief return all variables used in the expression

View File

@ -411,13 +411,11 @@ void Functions::Stringify(transaction::Methods* trx,
return;
}
if (slice.isObject() || slice.isArray()) {
VPackDumper dumper(&buffer, trx->transactionContextPtr()->getVPackOptions());
dumper.dump(slice);
return;
}
VPackDumper dumper(&buffer);
VPackOptions* options = trx->transactionContextPtr()->getVPackOptionsForDump();
VPackOptions adjustedOptions = *options;
adjustedOptions.escapeUnicode = false;
adjustedOptions.escapeForwardSlashes = false;
VPackDumper dumper(&buffer, &adjustedOptions);
dumper.dump(slice);
}
@ -2270,7 +2268,7 @@ AqlValue Functions::Zip(arangodb::aql::Query* query,
for (VPackValueLength i = 0; i < n; ++i) {
buffer->reset();
Stringify(trx, adapter, keysSlice.at(i));
builder->add(std::string(buffer->c_str(), buffer->length()), valuesSlice.at(i));
builder->add(buffer->c_str(), buffer->length(), valuesSlice.at(i));
}
builder->close();
return AqlValue(builder.get());

View File

@ -28,6 +28,7 @@
#include "Aql/Condition.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Functions.h"
#include "Aql/Query.h"
#include "Basics/ScopeGuard.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"

View File

@ -26,6 +26,7 @@
#include "Aql/Collection.h"
#include "Aql/Condition.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Transaction/Methods.h"
#include <velocypack/Iterator.h>

View File

@ -48,7 +48,7 @@ ModificationBlock::ModificationBlock(ExecutionEngine* engine,
_isDBServer(false),
_usesDefaultSharding(true) {
_trx->orderDitch(_collection->cid());
_trx->pinData(_collection->cid());
auto const& registerPlan = ep->getRegisterPlan()->varInfo;

View File

@ -25,6 +25,8 @@
#include "Aql/Ast.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Aql/VariableGenerator.h"
using namespace arangodb::aql;

View File

@ -32,6 +32,7 @@
#include "Aql/Function.h"
#include "Aql/IndexNode.h"
#include "Aql/ModificationNodes.h"
#include "Aql/Query.h"
#include "Aql/ShortestPathNode.h"
#include "Aql/SortCondition.h"
#include "Aql/SortNode.h"

View File

@ -57,7 +57,7 @@ namespace aql {
class Parser {
public:
/// @brief create the parser
Parser(Query*);
explicit Parser(Query*);
/// @brief destroy the parser
~Parser();

98
arangod/Aql/PlanCache.cpp Normal file
View File

@ -0,0 +1,98 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "PlanCache.h"
#include "Aql/Ast.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Basics/ReadLocker.h"
#include "Basics/WriteLocker.h"
#include "VocBase/vocbase.h"
#include <velocypack/Builder.h>
using namespace arangodb::aql;
/// @brief singleton instance of the plan cache
static arangodb::aql::PlanCache Instance;
/// @brief create the plan cache
PlanCache::PlanCache() : _lock(), _plans() {}
/// @brief destroy the plan cache
PlanCache::~PlanCache() {}
/// @brief lookup a plan in the cache
std::shared_ptr<PlanCacheEntry> PlanCache::lookup(TRI_vocbase_t* vocbase, uint64_t hash,
char const* queryString,
size_t queryStringLength) {
READ_LOCKER(readLocker, _lock);
auto it = _plans.find(vocbase);
if (it == _plans.end()) {
// no entry found for the requested database
return std::shared_ptr<PlanCacheEntry>();
}
auto it2 = (*it).second.find(hash);
if (it2 == (*it).second.end()) {
// plan not found in cache
return std::shared_ptr<PlanCacheEntry>();
}
// plan found in cache
return (*it2).second;
}
/// @brief store a plan in the cache
void PlanCache::store(
TRI_vocbase_t* vocbase, uint64_t hash, char const* queryString,
size_t queryStringLength, ExecutionPlan const* plan) {
auto entry = std::make_unique<PlanCacheEntry>(std::string(queryString, queryStringLength), plan->toVelocyPack(plan->getAst(), true));
WRITE_LOCKER(writeLocker, _lock);
auto it = _plans.find(vocbase);
if (it == _plans.end()) {
// create entry for the current database
it = _plans.emplace(vocbase, std::unordered_map<uint64_t, std::shared_ptr<PlanCacheEntry>>()).first;
}
// store cache entry
(*it).second.emplace(hash, std::move(entry));
}
/// @brief invalidate all queries for a particular database
void PlanCache::invalidate(TRI_vocbase_t* vocbase) {
WRITE_LOCKER(writeLocker, _lock);
_plans.erase(vocbase);
}
/// @brief get the plan cache instance
PlanCache* PlanCache::instance() { return &Instance; }

84
arangod/Aql/PlanCache.h Normal file
View File

@ -0,0 +1,84 @@
////////////////////////////////////////////////////////////////////////////////
/// DISCLAIMER
///
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGOD_AQL_PLAN_CACHE_H
#define ARANGOD_AQL_PLAN_CACHE_H 1
#include "Basics/Common.h"
#include "Basics/ReadWriteLock.h"
struct TRI_vocbase_t;
namespace arangodb {
namespace velocypack {
class Builder;
}
namespace aql {
class ExecutionPlan;
class VariableGenerator;
struct PlanCacheEntry {
PlanCacheEntry(std::string&& queryString,
std::shared_ptr<arangodb::velocypack::Builder> builder)
: queryString(std::move(queryString)), builder(builder) {}
std::string queryString;
std::shared_ptr<arangodb::velocypack::Builder> builder;
};
class PlanCache {
public:
PlanCache(PlanCache const&) = delete;
PlanCache& operator=(PlanCache const&) = delete;
/// @brief create cache
PlanCache();
/// @brief destroy the cache
~PlanCache();
public:
/// @brief lookup a plan in the cache
std::shared_ptr<PlanCacheEntry> lookup(TRI_vocbase_t*, uint64_t, char const*, size_t);
/// @brief store a plan in the cache
void store(TRI_vocbase_t*, uint64_t, char const*, size_t, ExecutionPlan const*);
/// @brief invalidate all plans for a particular database
void invalidate(TRI_vocbase_t*);
/// @brief get the pointer to the global plan cache
static PlanCache* instance();
private:
/// @brief read-write lock for the cache
arangodb::basics::ReadWriteLock _lock;
/// @brief cached query plans, organized per database
std::unordered_map<TRI_vocbase_t*, std::unordered_map<uint64_t, std::shared_ptr<PlanCacheEntry>>> _plans;
};
}
}
#endif

View File

@ -31,6 +31,7 @@
#include "Aql/Executor.h"
#include "Aql/Optimizer.h"
#include "Aql/Parser.h"
#include "Aql/PlanCache.h"
#include "Aql/QueryCache.h"
#include "Aql/QueryList.h"
#include "Basics/Exceptions.h"
@ -54,11 +55,17 @@
#include <velocypack/Iterator.h>
#include <velocypack/velocypack-aliases.h>
#ifndef USE_PLAN_CACHE
#undef USE_PLAN_CACHE
#endif
using namespace arangodb;
using namespace arangodb::aql;
namespace {
static std::atomic<TRI_voc_tick_t> NextQueryId(1);
constexpr uint64_t DontCache = 0;
}
/// @brief names of query phases / states
@ -148,20 +155,15 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_resourceMonitor(),
_resources(&_resourceMonitor),
_vocbase(vocbase),
_executor(nullptr),
_context(nullptr),
_queryString(queryString),
_queryLength(queryLength),
_queryStringLength(queryLength),
_queryBuilder(),
_bindParameters(bindParameters),
_options(options),
_collections(vocbase),
_ast(nullptr),
_profile(nullptr),
_state(INVALID_STATE),
_parser(nullptr),
_trx(nullptr),
_engine(nullptr),
_maxWarningCount(10),
_warnings(),
_startTime(TRI_microtime()),
@ -221,20 +223,14 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
_resourceMonitor(),
_resources(&_resourceMonitor),
_vocbase(vocbase),
_executor(nullptr),
_context(nullptr),
_queryString(nullptr),
_queryLength(0),
_queryStringLength(0),
_queryBuilder(queryStruct),
_bindParameters(nullptr),
_options(options),
_collections(vocbase),
_ast(nullptr),
_profile(nullptr),
_state(INVALID_STATE),
_parser(nullptr),
_trx(nullptr),
_engine(nullptr),
_maxWarningCount(10),
_warnings(),
_startTime(TRI_microtime()),
@ -278,11 +274,9 @@ Query::~Query() {
}
cleanupPlanAndEngine(TRI_ERROR_INTERNAL); // abort the transaction
delete _profile;
_profile = nullptr;
_profile.reset();
delete _executor;
_executor = nullptr;
_executor.reset();
if (_context != nullptr) {
TRI_ASSERT(!_contextOwnedByExterior);
@ -300,8 +294,7 @@ Query::~Query() {
_context = nullptr;
}
delete _ast;
_ast = nullptr;
_ast.reset();
for (auto& it : _graphs) {
delete it.second;
@ -317,7 +310,7 @@ Query::~Query() {
/// the query
Query* Query::clone(QueryPart part, bool withPlan) {
auto clone =
std::make_unique<Query>(false, _vocbase, _queryString, _queryLength,
std::make_unique<Query>(false, _vocbase, _queryString, _queryStringLength,
std::shared_ptr<VPackBuilder>(), _options, part);
clone->_resourceMonitor = _resourceMonitor;
@ -373,7 +366,7 @@ std::string Query::extractRegion(int line, int column) const {
char c;
char const* p = _queryString;
while ((static_cast<size_t>(p - _queryString) < _queryLength) && (c = *p)) {
while ((static_cast<size_t>(p - _queryString) < _queryStringLength) && (c = *p)) {
if (currentLine > line ||
(currentLine >= line && currentColumn >= column)) {
break;
@ -406,9 +399,9 @@ std::string Query::extractRegion(int line, int column) const {
static int const SNIPPET_LENGTH = 32;
static char const* SNIPPET_SUFFIX = "...";
if (_queryLength < offset + SNIPPET_LENGTH) {
if (_queryStringLength < offset + SNIPPET_LENGTH) {
// return a copy of the region
return std::string(_queryString + offset, _queryLength - offset);
return std::string(_queryString + offset, _queryStringLength - offset);
}
// copy query part
@ -461,157 +454,195 @@ void Query::registerWarning(int code, char const* details) {
}
}
void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) {
TRI_ASSERT(registry != nullptr);
init();
enterState(PARSING);
std::unique_ptr<ExecutionPlan> plan;
#if USE_PLAN_CACHE
if (_queryString != nullptr &&
queryStringHash != DontCache &&
_part == PART_MAIN) {
// LOG_TOPIC(INFO, Logger::FIXME) << "trying to find query in execution plan cache: '" << std::string(_queryString, _queryStringLength) << "', hash: " << queryStringHash;
// store & lookup velocypack plans!!
std::shared_ptr<PlanCacheEntry> planCacheEntry = PlanCache::instance()->lookup(_vocbase, queryStringHash, _queryString, _queryStringLength);
if (planCacheEntry != nullptr) {
// LOG_TOPIC(INFO, Logger::FIXME) << "query found in execution plan cache: '" << std::string(_queryString, _queryStringLength) << "'";
TRI_ASSERT(_trx == nullptr);
TRI_ASSERT(_collections.empty());
// create the transaction object, but do not start it yet
AqlTransaction* trx = new AqlTransaction(
createTransactionContext(), _collections.collections(),
_part == PART_MAIN);
_trx = trx;
VPackBuilder* builder = planCacheEntry->builder.get();
VPackSlice slice = builder->slice();
ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), slice);
_ast->variables()->fromVelocyPack(slice);
enterState(LOADING_COLLECTIONS);
int res = trx->addCollections(*_collections.collections());
if (res == TRI_ERROR_NO_ERROR) {
res = _trx->begin();
}
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), slice));
TRI_ASSERT(plan != nullptr);
}
}
#endif
if (plan == nullptr) {
plan.reset(prepare());
TRI_ASSERT(plan != nullptr);
#if USE_PLAN_CACHE
if (_queryString != nullptr &&
queryStringHash != DontCache &&
_part == PART_MAIN &&
_warnings.empty() &&
_ast->root()->isCacheable()) {
// LOG_TOPIC(INFO, Logger::FIXME) << "storing query in execution plan cache '" << std::string(_queryString, _queryStringLength) << "', hash: " << queryStringHash;
PlanCache::instance()->store(_vocbase, queryStringHash, _queryString, _queryStringLength, plan.get());
}
#endif
}
enterState(EXECUTION);
TRI_ASSERT(_engine == nullptr);
// note that the engine returned here may already be present in our
// own _engine attribute (the instanciation procedure may modify us
// by calling our engine(ExecutionEngine*) function
// this is confusing and should be fixed!
std::unique_ptr<ExecutionEngine> engine(ExecutionEngine::instantiateFromPlan(registry, this, plan.get(), _queryString != nullptr));
if (_engine == nullptr) {
_engine = std::move(engine);
} else {
engine.release();
}
_plan = std::move(plan);
}
/// @brief prepare an AQL query, this is a preparation for execute, but
/// execute calls it internally. The purpose of this separate method is
/// to be able to only prepare a query from VelocyPack and then store it in the
/// QueryRegistry.
QueryResult Query::prepare(QueryRegistry* registry) {
ExecutionPlan* Query::prepare() {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::prepare"
<< " this: " << (uintptr_t) this;
TRI_ASSERT(registry != nullptr);
try {
init();
enterState(PARSING);
std::unique_ptr<ExecutionPlan> plan;
if (_queryString != nullptr) {
auto parser = std::make_unique<Parser>(this);
std::unique_ptr<ExecutionPlan> plan;
if (_queryString != nullptr) {
parser->parse(false);
// put in bind parameters
parser->ast()->injectBindParameters(_bindParameters);
}
parser->parse(false);
// put in bind parameters
parser->ast()->injectBindParameters(_bindParameters);
_isModificationQuery = parser->isModificationQuery();
// create the transaction object, but do not start it yet
AqlTransaction* trx = new AqlTransaction(
createTransactionContext(), _collections.collections(),
_part == PART_MAIN);
_trx = trx;
try {
bool planRegisters;
// As soon as we start du instantiate the plan we have to clean it
// up before killing the unique_ptr
if (_queryString != nullptr) {
// we have an AST
// optimize the ast
enterState(AST_OPTIMIZATION);
parser->ast()->validateAndOptimize();
enterState(LOADING_COLLECTIONS);
int res = trx->begin();
if (res != TRI_ERROR_NO_ERROR) {
return transactionError(res);
}
enterState(PLAN_INSTANTIATION);
plan.reset(ExecutionPlan::instantiateFromAst(parser->ast()));
if (plan.get() == nullptr) {
// oops
return QueryResult(TRI_ERROR_INTERNAL,
"failed to create query execution engine");
}
// Run the query optimizer:
enterState(PLAN_OPTIMIZATION);
arangodb::aql::Optimizer opt(maxNumberOfPlans());
// get enabled/disabled rules
opt.createPlans(plan.release(), getRulesFromOptions(),
inspectSimplePlans());
// Now plan and all derived plans belong to the optimizer
plan.reset(opt.stealBest()); // Now we own the best one again
planRegisters = true;
} else { // no queryString, we are instantiating from _queryBuilder
enterState(PARSING);
VPackSlice const querySlice = _queryBuilder->slice();
ExecutionPlan::getCollectionsFromVelocyPack(parser->ast(), querySlice);
parser->ast()->variables()->fromVelocyPack(querySlice);
// creating the plan may have produced some collections
// we need to add them to the transaction now (otherwise the query will
// fail)
enterState(LOADING_COLLECTIONS);
int res = trx->addCollections(*_collections.collections());
if (res == TRI_ERROR_NO_ERROR) {
res = trx->begin();
}
if (res != TRI_ERROR_NO_ERROR) {
return transactionError(res);
}
enterState(PLAN_INSTANTIATION);
// we have an execution plan in VelocyPack format
plan.reset(ExecutionPlan::instantiateFromVelocyPack(
parser->ast(), _queryBuilder->slice()));
if (plan.get() == nullptr) {
// oops
return QueryResult(TRI_ERROR_INTERNAL);
}
planRegisters = false;
}
TRI_ASSERT(plan.get() != nullptr);
// varsUsedLater and varsValid are unordered_sets and so their orders
// are not the same in the serialized and deserialized plans
// return the V8 context
exitContext();
enterState(EXECUTION);
ExecutionEngine* engine(ExecutionEngine::instantiateFromPlan(
registry, this, plan.get(), planRegisters));
// If all went well so far, then we keep _plan, _parser and _trx and
// return:
_plan = std::move(plan);
_parser = parser.release();
_engine = engine;
return QueryResult();
} catch (arangodb::basics::Exception const& ex) {
cleanupPlanAndEngine(ex.code());
return QueryResult(ex.code(), ex.message() + getStateString());
} catch (std::bad_alloc const&) {
cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY);
return QueryResult(
TRI_ERROR_OUT_OF_MEMORY,
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
} catch (std::exception const& ex) {
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
} catch (...) {
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
return QueryResult(TRI_ERROR_INTERNAL,
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
}
} catch (arangodb::basics::Exception const& ex) {
return QueryResult(ex.code(), ex.message() + getStateString());
} catch (std::bad_alloc const&) {
return QueryResult(
TRI_ERROR_OUT_OF_MEMORY,
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
} catch (std::exception const& ex) {
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
} catch (...) {
return QueryResult(TRI_ERROR_INTERNAL,
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
}
TRI_ASSERT(_trx == nullptr);
// create the transaction object, but do not start it yet
AqlTransaction* trx = new AqlTransaction(
createTransactionContext(), _collections.collections(),
_part == PART_MAIN);
_trx = trx;
// As soon as we start du instantiate the plan we have to clean it
// up before killing the unique_ptr
if (_queryString != nullptr) {
// we have an AST
// optimize the ast
enterState(AST_OPTIMIZATION);
_ast->validateAndOptimize();
enterState(LOADING_COLLECTIONS);
int res = _trx->begin();
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
plan.reset(ExecutionPlan::instantiateFromAst(_ast.get()));
if (plan.get() == nullptr) {
// oops
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "failed to create query execution engine");
}
// Run the query optimizer:
enterState(PLAN_OPTIMIZATION);
arangodb::aql::Optimizer opt(maxNumberOfPlans());
// get enabled/disabled rules
opt.createPlans(plan.release(), getRulesFromOptions(),
inspectSimplePlans());
// Now plan and all derived plans belong to the optimizer
plan.reset(opt.stealBest()); // Now we own the best one again
} else { // no queryString, we are instantiating from _queryBuilder
VPackSlice const querySlice = _queryBuilder->slice();
ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), querySlice);
_ast->variables()->fromVelocyPack(querySlice);
// creating the plan may have produced some collections
// we need to add them to the transaction now (otherwise the query will
// fail)
enterState(LOADING_COLLECTIONS);
int res = trx->addCollections(*_collections.collections());
if (res == TRI_ERROR_NO_ERROR) {
res = _trx->begin();
}
if (res != TRI_ERROR_NO_ERROR) {
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
// we have an execution plan in VelocyPack format
plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), _queryBuilder->slice()));
if (plan.get() == nullptr) {
// oops
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "could not create plan from vpack");
}
}
TRI_ASSERT(plan != nullptr);
// varsUsedLater and varsValid are unordered_sets and so their orders
// are not the same in the serialized and deserialized plans
// return the V8 context if we are in one
exitContext();
return plan.release();
}
/// @brief execute an AQL query
@ -625,20 +656,17 @@ QueryResult Query::execute(QueryRegistry* registry) {
try {
bool useQueryCache = canUseQueryCache();
uint64_t queryStringHash = 0;
uint64_t queryStringHash = hash();
if (useQueryCache) {
// hash the query
queryStringHash = hash();
// check the query cache for an existing result
auto cacheEntry = arangodb::aql::QueryCache::instance()->lookup(
_vocbase, queryStringHash, _queryString, _queryLength);
_vocbase, queryStringHash, _queryString, _queryStringLength);
arangodb::aql::QueryCacheResultEntryGuard guard(cacheEntry);
if (cacheEntry != nullptr) {
// got a result from the query cache
QueryResult res(TRI_ERROR_NO_ERROR);
QueryResult res;
// we don't have yet a transaction when we're here, so let's create
// a mimimal context to build the result
res.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
@ -651,18 +679,15 @@ QueryResult Query::execute(QueryRegistry* registry) {
}
}
QueryResult result = prepare(registry);
if (result.code != TRI_ERROR_NO_ERROR) {
return result;
}
// will throw if it fails
prepare(registry, queryStringHash);
if (_queryString == nullptr) {
// we don't have query string... now pass query id to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id));
} else {
// we do have a query string... pass query to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryLength));
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryStringLength));
}
log();
@ -672,20 +697,22 @@ QueryResult Query::execute(QueryRegistry* registry) {
useQueryCache = false;
}
AqlItemBlock* value = nullptr;
VPackOptions options = VPackOptions::Defaults;
options.buildUnindexedArrays = true;
options.buildUnindexedObjects = true;
TRI_ASSERT(_engine != nullptr);
auto resultBuilder = std::make_shared<VPackBuilder>(&options);
resultBuilder->buffer()->reserve(
16 * 1024); // reserve some space in Builder to avoid frequent reallocs
TRI_ASSERT(_engine != nullptr);
// this is the RegisterId our results can be found in
auto const resultRegister = _engine->resultRegister();
AqlItemBlock* value = nullptr;
try {
resultBuilder->openArray();
// this is the RegisterId our results can be found in
auto const resultRegister = _engine->resultRegister();
if (useQueryCache) {
// iterate over result, return it and store it in query cache
@ -712,7 +739,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
if (_warnings.empty()) {
// finally store the generated result in the query cache
auto result = QueryCache::instance()->store(
_vocbase, queryStringHash, _queryString, _queryLength,
_vocbase, queryStringHash, _queryString, _queryStringLength,
resultBuilder, _trx->state()->collectionNames());
if (result == nullptr) {
@ -742,9 +769,19 @@ QueryResult Query::execute(QueryRegistry* registry) {
delete value;
throw;
}
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::execute: before _trx->commit"
<< " this: " << (uintptr_t) this;
_trx->commit();
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::execute: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t) this;
QueryResult result;
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
@ -752,7 +789,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION);
result.warnings = warningsToVelocyPack();
result.result = resultBuilder;
result.stats = stats;
@ -797,46 +834,48 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::executeV8"
<< " this: " << (uintptr_t) this;
TRI_ASSERT(registry != nullptr);
std::unique_ptr<AqlWorkStack> work;
try {
bool useQueryCache = canUseQueryCache();
uint64_t queryStringHash = 0;
uint64_t queryStringHash = hash();
if (useQueryCache) {
// hash the query
queryStringHash = hash();
// check the query cache for an existing result
auto cacheEntry = arangodb::aql::QueryCache::instance()->lookup(
_vocbase, queryStringHash, _queryString, _queryLength);
_vocbase, queryStringHash, _queryString, _queryStringLength);
arangodb::aql::QueryCacheResultEntryGuard guard(cacheEntry);
if (cacheEntry != nullptr) {
// got a result from the query cache
QueryResultV8 res(TRI_ERROR_NO_ERROR);
QueryResultV8 result;
// we don't have yet a transaction when we're here, so let's create
// a mimimal context to build the result
res.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
result.context = std::make_shared<StandaloneTransactionContext>(_vocbase);
v8::Handle<v8::Value> values =
TRI_VPackToV8(isolate, cacheEntry->_queryResult->slice(),
res.context->getVPackOptions());
result.context->getVPackOptions());
TRI_ASSERT(values->IsArray());
res.result = v8::Handle<v8::Array>::Cast(values);
res.cached = true;
return res;
result.result = v8::Handle<v8::Array>::Cast(values);
result.cached = true;
return result;
}
}
QueryResultV8 result = prepare(registry);
if (result.code != TRI_ERROR_NO_ERROR) {
return result;
// will throw if it fails
prepare(registry, queryStringHash);
if (_queryString == nullptr) {
// we don't have query string... now pass query id to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id));
} else {
// we do have a query string... pass query to WorkMonitor
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryStringLength));
}
work.reset(new AqlWorkStack(_vocbase, _id, _queryString, _queryLength));
log();
if (useQueryCache && (_isModificationQuery || !_warnings.empty() ||
@ -844,6 +883,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
useQueryCache = false;
}
QueryResultV8 result;
result.result = v8::Array::New(isolate);
TRI_ASSERT(_engine != nullptr);
@ -884,7 +924,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
if (_warnings.empty()) {
// finally store the generated result in the query cache
QueryCache::instance()->store(_vocbase, queryStringHash, _queryString,
_queryLength, builder,
_queryStringLength, builder,
_trx->state()->collectionNames());
}
} else {
@ -930,6 +970,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
<< " this: " << (uintptr_t) this;
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
@ -946,6 +987,10 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
// patch executionTime stats value in place
// we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime);
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
<< "Query::executeV8:returning"
<< " this: " << (uintptr_t) this;
return result;
} catch (arangodb::basics::Exception const& ex) {
@ -1017,7 +1062,7 @@ QueryResult Query::explain() {
int res = _trx->begin();
if (res != TRI_ERROR_NO_ERROR) {
return transactionError(res);
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
}
enterState(PLAN_INSTANTIATION);
@ -1066,7 +1111,7 @@ QueryResult Query::explain() {
result.result = bestPlan->toVelocyPack(parser.ast(), verbosePlans());
// cacheability
result.cached = (_queryString != nullptr && _queryLength > 0 &&
result.cached = (_queryString != nullptr && _queryStringLength > 0 &&
!_isModificationQuery && _warnings.empty() &&
_ast->root()->isCacheable());
}
@ -1091,16 +1136,20 @@ QueryResult Query::explain() {
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
}
}
void Query::engine(ExecutionEngine* engine) {
_engine.reset(engine);
}
/// @brief get v8 executor
Executor* Query::executor() {
if (_executor == nullptr) {
// the executor is a singleton per query
_executor = new Executor(literalSizeThreshold());
_executor.reset(new Executor(literalSizeThreshold()));
}
TRI_ASSERT(_executor != nullptr);
return _executor;
return _executor.get();
}
/// @brief enter a V8 context
@ -1226,16 +1275,15 @@ void Query::init() {
}
TRI_ASSERT(_id == 0);
TRI_ASSERT(_ast == nullptr);
_id = Query::NextId();
TRI_ASSERT(_id != 0);
TRI_ASSERT(_profile == nullptr);
_profile = new Profile(this);
_profile.reset(new Profile(this));
enterState(INITIALIZATION);
TRI_ASSERT(_ast == nullptr);
_ast = new Ast(this);
_ast.reset(new Ast(this));
}
/// @brief log a query
@ -1245,16 +1293,20 @@ void Query::log() {
LOG_TOPIC(TRACE, Logger::QUERIES)
<< "executing query " << _id << ": '"
<< std::string(_queryString, (std::min)(_queryLength, MaxLength))
.append(_queryLength > MaxLength ? "..." : "") << "'";
<< std::string(_queryString, (std::min)(_queryStringLength, MaxLength))
.append(_queryStringLength > MaxLength ? "..." : "") << "'";
}
}
/// @brief calculate a hash value for the query and bind parameters
uint64_t Query::hash() const {
if (_queryString == nullptr) {
return DontCache;
}
// hash the query string first
uint64_t hash = arangodb::aql::QueryCache::instance()->hashQueryString(
_queryString, _queryLength);
_queryString, _queryStringLength);
// handle "fullCount" option. if this option is set, the query result will
// be different to when it is not set!
@ -1270,6 +1322,14 @@ uint64_t Query::hash() const {
} else {
hash = fasthash64(TRI_CHAR_LENGTH_PAIR("count:false"), hash);
}
// also hash "optimizer" options
VPackSlice options = basics::VelocyPackHelper::EmptyObjectValue();
if (_options != nullptr && _options->slice().isObject()) {
options = _options->slice().get("optimizer");
}
hash ^= options.hash();
// blend query hash with bind parameters
return hash ^ _bindParameters.hash();
@ -1277,7 +1337,7 @@ uint64_t Query::hash() const {
/// @brief whether or not the query cache can be used for the query
bool Query::canUseQueryCache() const {
if (_queryString == nullptr || _queryLength < 8) {
if (_queryString == nullptr || _queryStringLength < 8) {
return false;
}
@ -1302,16 +1362,17 @@ bool Query::canUseQueryCache() const {
return false;
}
/// @brief neatly format transaction error to the user.
QueryResult Query::transactionError(int errorCode) const {
/// @brief neatly format exception messages for the users
std::string Query::buildErrorMessage(int errorCode) const {
std::string err(TRI_errno_string(errorCode));
if (_queryString != nullptr && verboseErrors()) {
err +=
std::string("\nwhile executing:\n") + _queryString + std::string("\n");
err += "\nwhile executing:\n";
err.append(_queryString, _queryStringLength);
err += "\n";
}
return QueryResult(errorCode, err);
return err;
}
/// @brief read the "optimizer.inspectSimplePlans" section from the options
@ -1401,8 +1462,7 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
// shutdown may fail but we must not throw here
// (we're also called from the destructor)
}
delete _engine;
_engine = nullptr;
_engine.reset();
}
if (_trx != nullptr) {
@ -1411,17 +1471,11 @@ void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
_trx = nullptr;
}
if (_parser != nullptr) {
delete _parser;
_parser = nullptr;
}
_plan.reset();
}
/// @brief create a TransactionContext
std::shared_ptr<TransactionContext>
Query::createTransactionContext() {
std::shared_ptr<TransactionContext> Query::createTransactionContext() {
if (_contextOwnedByExterior) {
// we can use v8
return arangodb::V8TransactionContext::Create(_vocbase, true);
@ -1430,7 +1484,7 @@ Query::createTransactionContext() {
return arangodb::StandaloneTransactionContext::Create(_vocbase);
}
/// @brief look up a graph either from our cache list or from the _graphs
/// @brief look up a graph either from our cache list or from the _graphs
/// collection
Graph const* Query::lookupGraphByName(std::string const& name) {
auto it = _graphs.find(name);
@ -1440,7 +1494,7 @@ Graph const* Query::lookupGraphByName(std::string const& name) {
}
std::unique_ptr<arangodb::aql::Graph> g(
arangodb::lookupGraphByName(_vocbase, name));
arangodb::lookupGraphByName(createTransactionContext(), name));
if (g == nullptr) {
return nullptr;
@ -1450,7 +1504,7 @@ Graph const* Query::lookupGraphByName(std::string const& name) {
return g.release();
}
/// @brief returns the next query id
TRI_voc_tick_t Query::NextId() {
return NextQueryId.fetch_add(1, std::memory_order_seq_cst);

View File

@ -59,7 +59,6 @@ class Ast;
class ExecutionEngine;
class ExecutionPlan;
class Executor;
class Parser;
class Query;
class QueryRegistry;
@ -164,10 +163,12 @@ class Query {
char const* queryString() const { return _queryString; }
/// @brief get the length of the query string
size_t queryLength() const { return _queryLength; }
size_t queryLength() const { return _queryStringLength; }
/// @brief getter for _ast
Ast* ast() const { return _ast; }
Ast* ast() const {
return _ast.get();
}
/// @brief should we return verbose plans?
bool verbosePlans() const { return getBooleanOption("verbosePlans", false); }
@ -238,12 +239,8 @@ class Query {
/// @brief register a warning
void registerWarning(int, char const* = nullptr);
/// @brief prepare an AQL query, this is a preparation for execute, but
/// execute calls it internally. The purpose of this separate method is
/// to be able to only prepare a query from VelocyPack and then store it in the
/// QueryRegistry.
QueryResult prepare(QueryRegistry*);
void prepare(QueryRegistry*, uint64_t queryStringHash);
/// @brief execute an AQL query
QueryResult execute(QueryRegistry*);
@ -262,10 +259,10 @@ class Query {
Executor* executor();
/// @brief return the engine, if prepared
ExecutionEngine* engine() { return _engine; }
ExecutionEngine* engine() const { return _engine.get(); }
/// @brief inject the engine
void engine(ExecutionEngine* engine) { _engine = engine; }
void engine(ExecutionEngine* engine);
/// @brief return the transaction, if prepared
inline transaction::Methods* trx() { return _trx; }
@ -333,6 +330,12 @@ class Query {
/// @brief initializes the query
void init();
/// @brief prepare an AQL query, this is a preparation for execute, but
/// execute calls it internally. The purpose of this separate method is
/// to be able to only prepare a query from VelocyPack and then store it in the
/// QueryRegistry.
ExecutionPlan* prepare();
void setExecutionTime();
/// @brief log a query
@ -371,8 +374,8 @@ class Query {
/// @brief read the "optimizer.rules" section from the options
std::vector<std::string> getRulesFromOptions() const;
/// @brief neatly format transaction errors to the user.
QueryResult transactionError(int errorCode) const;
/// @brief neatly format exception messages for the users
std::string buildErrorMessage(int errorCode) const;
/// @brief enter a new state
void enterState(ExecutionState);
@ -400,7 +403,7 @@ class Query {
TRI_vocbase_t* _vocbase;
/// @brief V8 code executor
Executor* _executor;
std::unique_ptr<Executor> _executor;
/// @brief the currently used V8 context
V8Context* _context;
@ -412,7 +415,7 @@ class Query {
char const* _queryString;
/// @brief length of the query string in bytes
size_t const _queryLength;
size_t const _queryStringLength;
/// @brief query in a VelocyPack structure
std::shared_ptr<arangodb::velocypack::Builder> const _queryBuilder;
@ -428,20 +431,17 @@ class Query {
/// @brief _ast, we need an ast to manage the memory for AstNodes, even
/// if we do not have a parser, because AstNodes occur in plans and engines
Ast* _ast;
std::unique_ptr<Ast> _ast;
/// @brief query execution profile
Profile* _profile;
std::unique_ptr<Profile> _profile;
/// @brief current state the query is in (used for profiling and error
/// messages)
ExecutionState _state;
/// @brief the ExecutionPlan object, if the query is prepared
std::unique_ptr<ExecutionPlan> _plan;
/// @brief the Parser object, if the query is prepared
Parser* _parser;
std::shared_ptr<ExecutionPlan> _plan;
/// @brief the transaction object, in a distributed query every part of
/// the query has its own transaction object. The transaction object is
@ -449,7 +449,7 @@ class Query {
transaction::Methods* _trx;
/// @brief the ExecutionEngine object, if the query is prepared
ExecutionEngine* _engine;
std::unique_ptr<ExecutionEngine> _engine;
/// @brief maximum number of warnings
size_t _maxWarningCount;

View File

@ -21,7 +21,7 @@
/// @author Jan Steemann
////////////////////////////////////////////////////////////////////////////////
#include "Aql/QueryCache.h"
#include "QueryCache.h"
#include "Basics/fasthash.h"
#include "Basics/Exceptions.h"
#include "Basics/MutexLocker.h"

View File

@ -55,6 +55,12 @@ QueryResources::~QueryResources() {
_resourceMonitor->decreaseMemoryUsage(_nodes.size() * sizeof(AstNode) + _nodes.capacity() * sizeof(AstNode*));
}
// TODO: FIXME
void QueryResources::steal() {
_strings.clear();
_nodes.clear();
}
/// @brief add a node to the list of nodes
void QueryResources::addNode(AstNode* node) {

View File

@ -41,7 +41,9 @@ class QueryResources {
explicit QueryResources(ResourceMonitor*);
~QueryResources();
void steal();
/// @brief add a node to the list of nodes
void addNode(AstNode*);

View File

@ -44,6 +44,7 @@ struct QueryResultV8 : public QueryResult {
QueryResultV8(int code, std::string const& details)
: QueryResult(code, details), result() {}
QueryResultV8() : QueryResult(TRI_ERROR_NO_ERROR) {}
explicit QueryResultV8(int code) : QueryResult(code, ""), result() {}
v8::Handle<v8::Array> result;

View File

@ -26,6 +26,7 @@
#include "Aql/ClusterBlocks.h"
#include "Aql/ExecutionBlock.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/Query.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "Basics/VPackStringBufferAdapter.h"
@ -95,14 +96,18 @@ void RestAqlHandler::createQueryFromVelocyPack() {
VelocyPackHelper::getStringValue(querySlice, "part", "");
auto planBuilder = std::make_shared<VPackBuilder>(VPackBuilder::clone(plan));
auto query = new Query(false, _vocbase, planBuilder, options,
(part == "main" ? PART_MAIN : PART_DEPENDENT));
QueryResult res = query->prepare(_queryRegistry);
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << res.details;
generateError(rest::ResponseCode::BAD,
TRI_ERROR_QUERY_BAD_JSON_PLAN, res.details);
delete query;
auto query = std::make_unique<Query>(false, _vocbase, planBuilder, options,
(part == "main" ? PART_MAIN : PART_DEPENDENT));
try {
query->prepare(_queryRegistry, 0);
} catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what();
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what());
return;
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query";
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
return;
}
@ -116,14 +121,15 @@ void RestAqlHandler::createQueryFromVelocyPack() {
}
_qId = TRI_NewTickServer();
auto transactionContext = query->trx()->transactionContext().get();
try {
_queryRegistry->insert(_qId, query, ttl);
_queryRegistry->insert(_qId, query.get(), ttl);
query.release();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry";
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
"could not keep query in registry");
delete query;
return;
}
@ -139,8 +145,7 @@ void RestAqlHandler::createQueryFromVelocyPack() {
return;
}
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(),
query->trx()->transactionContext().get());
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext);
}
// POST method for /_api/aql/parse (internal)
@ -165,14 +170,12 @@ void RestAqlHandler::parseQuery() {
return;
}
auto query =
new Query(false, _vocbase, queryString.c_str(), queryString.size(),
auto query = std::make_unique<Query>(false, _vocbase, queryString.c_str(), queryString.size(),
std::shared_ptr<VPackBuilder>(), nullptr, PART_MAIN);
QueryResult res = query->parse();
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the Query: " << res.details;
generateError(rest::ResponseCode::BAD, res.code, res.details);
delete query;
return;
}
@ -306,15 +309,19 @@ void RestAqlHandler::createQueryFromString() {
auto options = std::make_shared<VPackBuilder>(
VPackBuilder::clone(querySlice.get("options")));
auto query = new Query(false, _vocbase, queryString.c_str(),
auto query = std::make_unique<Query>(false, _vocbase, queryString.c_str(),
queryString.size(), bindVars, options,
(part == "main" ? PART_MAIN : PART_DEPENDENT));
QueryResult res = query->prepare(_queryRegistry);
if (res.code != TRI_ERROR_NO_ERROR) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << res.details;
generateError(rest::ResponseCode::BAD,
TRI_ERROR_QUERY_BAD_JSON_PLAN, res.details);
delete query;
try {
query->prepare(_queryRegistry, 0);
} catch (std::exception const& ex) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query: " << ex.what();
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN, ex.what());
return;
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "failed to instantiate the query";
generateError(rest::ResponseCode::BAD, TRI_ERROR_QUERY_BAD_JSON_PLAN);
return;
}
@ -327,15 +334,16 @@ void RestAqlHandler::createQueryFromString() {
ttl = arangodb::basics::StringUtils::doubleDecimal(ttlstring);
}
auto transactionContext = query->trx()->transactionContext().get();
_qId = TRI_NewTickServer();
try {
_queryRegistry->insert(_qId, query, ttl);
_queryRegistry->insert(_qId, query.get(), ttl);
query.release();
} catch (...) {
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "could not keep query in registry";
generateError(rest::ResponseCode::BAD, TRI_ERROR_INTERNAL,
"could not keep query in registry");
delete query;
return;
}
@ -351,8 +359,7 @@ void RestAqlHandler::createQueryFromString() {
return;
}
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(),
query->trx()->transactionContext().get());
sendResponse(rest::ResponseCode::ACCEPTED, answerBody.slice(), transactionContext);
}
// PUT method for /_api/aql/<operation>/<queryId>, (internal)

View File

@ -25,6 +25,7 @@
#include "Aql/AqlItemBlock.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Utils/OperationCursor.h"
#include "Transaction/Methods.h"
#include "VocBase/EdgeCollectionInfo.h"

View File

@ -28,6 +28,7 @@
#include "Aql/Ast.h"
#include "Aql/Collection.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Query.h"
#include "Cluster/ClusterComm.h"
#include "Indexes/Index.h"
#include "Utils/CollectionNameResolver.h"
@ -178,6 +179,12 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan, size_t id,
} else {
addEdgeColl(eColName, dir);
}
if (dir == TRI_EDGE_ANY) {
// collection with direction ANY must be added again
_graphInfo.add(VPackValue(eColName));
}
}
_graphInfo.close();
} else {
@ -337,9 +344,17 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan,
THROW_ARANGO_EXCEPTION(TRI_ERROR_GRAPH_NOT_FOUND);
}
auto eColls = _graphObj->edgeCollections();
for (auto const& n : eColls) {
_edgeColls.push_back(n);
auto const& eColls = _graphObj->edgeCollections();
for (auto const& it : eColls) {
_edgeColls.push_back(it);
// if there are twice as many directions as collections, this means we
// have a shortest path with direction ANY. we must add each collection
// twice then
if (_directions.size() == 2 * eColls.size()) {
// add collection again
_edgeColls.push_back(it);
}
}
} else {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_BAD_JSON_PLAN,

View File

@ -26,22 +26,23 @@
using namespace arangodb::aql;
ShortestPathOptions::ShortestPathOptions(VPackSlice const& slice) {
VPackSlice obj = slice.get("shortestpathFlags");
weightAttribute = "";
if (obj.hasKey("weightAttribute")) {
VPackSlice v = obj.get("weightAttribute");
if (v.isString()) {
weightAttribute = v.copyString();
ShortestPathOptions::ShortestPathOptions(VPackSlice const& slice)
: weightAttribute(), defaultWeight(1) {
VPackSlice obj = slice.get("shortestPathFlags");
if (obj.isObject()) {
if (obj.hasKey("weightAttribute")) {
VPackSlice v = obj.get("weightAttribute");
if (v.isString()) {
weightAttribute = v.copyString();
}
}
}
defaultWeight = 1;
if (obj.hasKey("defaultWeight")) {
VPackSlice v = obj.get("defaultWeight");
if (v.isNumber()) {
defaultWeight = v.getNumericValue<double>();
if (obj.hasKey("defaultWeight")) {
VPackSlice v = obj.get("defaultWeight");
if (v.isNumber()) {
defaultWeight = v.getNumericValue<double>();
}
}
}
}

View File

@ -39,7 +39,7 @@ struct ShortestPathOptions {
/// @brief constructor, using default values
ShortestPathOptions()
: weightAttribute(""),
: weightAttribute(),
defaultWeight(1) {}
void toVelocyPack(arangodb::velocypack::Builder&) const;

View File

@ -85,6 +85,10 @@ SortCondition::SortCondition(
if (node->type == NODE_TYPE_REFERENCE) {
handled = true;
if (fieldNames.size() > 1) {
std::reverse(fieldNames.begin(), fieldNames.end());
}
_fields.emplace_back(std::make_pair(
static_cast<Variable const*>(node->getData()), fieldNames));
@ -146,7 +150,7 @@ size_t SortCondition::coveredAttributes(
}
auto const& field = _fields[fieldsPosition];
// ...and check if the field is present in the index definition too
if (reference == field.first &&
arangodb::basics::AttributeName::isIdentical(field.second, indexAttributes[i], false)) {

View File

@ -27,6 +27,7 @@
#include "Aql/ExecutionNode.h"
#include "Aql/ExecutionPlan.h"
#include "Aql/Functions.h"
#include "Aql/Query.h"
#include "Basics/ScopeGuard.h"
#include "Basics/StringRef.h"
#include "Cluster/ClusterComm.h"

View File

@ -103,7 +103,57 @@ static AstNode* BuildExpansionReplacement(Ast* ast, AstNode const* condition, As
return ast->createNodeBinaryOperator(type, lhs, rhs);
}
static inline bool IsSupportedNode(AstNode const* node) {
static bool IsSupportedNode(Variable const* pathVar, AstNode const* node) {
// do a quick first check for all comparisons
switch (node->type) {
case NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_NE:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_LT:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_LE:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_GT:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_GE:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_IN:
case NODE_TYPE_OPERATOR_BINARY_ARRAY_NIN:
case NODE_TYPE_OPERATOR_BINARY_EQ:
case NODE_TYPE_OPERATOR_BINARY_NE:
case NODE_TYPE_OPERATOR_BINARY_LT:
case NODE_TYPE_OPERATOR_BINARY_LE:
case NODE_TYPE_OPERATOR_BINARY_GT:
case NODE_TYPE_OPERATOR_BINARY_GE:
case NODE_TYPE_OPERATOR_BINARY_IN:
case NODE_TYPE_OPERATOR_BINARY_NIN: {
// the following types of expressions are not supported
// p.edges[0]._from op whatever attribute access
// whatever attribute access op p.edges[0]._from
AstNode const* lhs = node->getMember(0);
AstNode const* rhs = node->getMember(1);
if (lhs->isAttributeAccessForVariable(pathVar, true)) {
// p.xxx op whatever
if (rhs->type != NODE_TYPE_VALUE &&
rhs->type != NODE_TYPE_ARRAY &&
rhs->type != NODE_TYPE_OBJECT &&
rhs->type != NODE_TYPE_REFERENCE) {
return false;
}
} else if (rhs->isAttributeAccessForVariable(pathVar, true)) {
// whatever op p.xxx
if (lhs->type != NODE_TYPE_VALUE &&
lhs->type != NODE_TYPE_ARRAY &&
lhs->type != NODE_TYPE_OBJECT &&
lhs->type != NODE_TYPE_REFERENCE) {
return false;
}
}
break;
}
default: {
// intentionally no other cases defined...
// we'll simply fall through to the next switch..case statement
break;
}
}
switch (node->type) {
case NODE_TYPE_VARIABLE:
case NODE_TYPE_OPERATOR_UNARY_PLUS:
@ -169,7 +219,7 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent,
Variable const* pathVar,
bool& conditionIsImpossible) {
AstNode* node = parent->getMemberUnchecked(testIndex);
if (!IsSupportedNode(node)) {
if (!IsSupportedNode(pathVar, node)) {
return false;
}
// We need to walk through each branch and validate:
@ -193,11 +243,11 @@ static bool checkPathVariableAccessFeasible(Ast* ast, AstNode* parent,
// We define that patternStep >= 6 is complete Match.
unsigned char patternStep = 0;
auto supportedGuard = [&notSupported](AstNode const* n, void*) -> bool {
auto supportedGuard = [&notSupported, pathVar](AstNode const* n, void*) -> bool {
if (notSupported) {
return false;
}
if (!IsSupportedNode(n)) {
if (!IsSupportedNode(pathVar, n)) {
notSupported = true;
return false;
}

Some files were not shown because too many files have changed in this diff Show More