mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' into catch_tests_ng
This commit is contained in:
commit
59b49485b5
|
@ -24,8 +24,8 @@ addons:
|
|||
- ubuntu-toolchain-r-test
|
||||
- george-edison55-precise-backports
|
||||
packages:
|
||||
- g++-4.9
|
||||
- gcc-4.9
|
||||
- g++-5
|
||||
- gcc-5
|
||||
- binutils-gold
|
||||
- gdb
|
||||
- cmake-data
|
||||
|
|
|
@ -89,10 +89,15 @@
|
|||
#include <conio.h>
|
||||
#include <windows.h>
|
||||
#include <io.h>
|
||||
#if _MSC_VER < 1900
|
||||
|
||||
#if defined(_MSC_VER) && _MSC_VER < 1900
|
||||
#define snprintf _snprintf // Microsoft headers use underscores in some names
|
||||
#endif
|
||||
|
||||
#if !defined GNUC
|
||||
#define strcasecmp _stricmp
|
||||
#endif
|
||||
|
||||
#define strdup _strdup
|
||||
#define isatty _isatty
|
||||
#define write _write
|
||||
|
|
|
@ -49,6 +49,9 @@ struct HexDump {
|
|||
: HexDump(*slice, valuesPerLine, separator) {}
|
||||
|
||||
static std::string toHex(uint8_t value);
|
||||
std::string toString() const;
|
||||
|
||||
friend std::ostream& operator<<(std::ostream&, HexDump const&);
|
||||
|
||||
Slice const slice;
|
||||
int valuesPerLine;
|
||||
|
@ -58,8 +61,4 @@ struct HexDump {
|
|||
} // namespace arangodb::velocypack
|
||||
} // namespace arangodb
|
||||
|
||||
std::ostream& operator<<(std::ostream&, arangodb::velocypack::HexDump const*);
|
||||
|
||||
std::ostream& operator<<(std::ostream&, arangodb::velocypack::HexDump const&);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -855,6 +855,7 @@ class Slice {
|
|||
return Slice(left).equals(Slice(right));
|
||||
}
|
||||
|
||||
std::string toHex() const;
|
||||
std::string toJson(Options const* options = &Options::Defaults) const;
|
||||
std::string toString(Options const* options = &Options::Defaults) const;
|
||||
std::string hexType() const;
|
||||
|
|
|
@ -42,14 +42,41 @@ std::string HexDump::toHex(uint8_t value) {
|
|||
return result;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream, HexDump const* hexdump) {
|
||||
std::string HexDump::toString() const {
|
||||
ValueLength length = slice.byteSize();
|
||||
std::string result;
|
||||
result.reserve(4 * (length + separator.size()) + (length / valuesPerLine) + 1);
|
||||
|
||||
int current = 0;
|
||||
|
||||
for (uint8_t it : hexdump->slice) {
|
||||
for (uint8_t it : slice) {
|
||||
if (current != 0) {
|
||||
stream << hexdump->separator;
|
||||
result.append(separator);
|
||||
|
||||
if (hexdump->valuesPerLine > 0 && current == hexdump->valuesPerLine) {
|
||||
if (valuesPerLine > 0 && current == valuesPerLine) {
|
||||
result.push_back('\n');
|
||||
current = 0;
|
||||
}
|
||||
}
|
||||
|
||||
result.append(HexDump::toHex(it));
|
||||
++current;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream, HexDump const& hexdump) {
|
||||
int current = 0;
|
||||
|
||||
for (uint8_t it : hexdump.slice) {
|
||||
if (current != 0) {
|
||||
stream << hexdump.separator;
|
||||
|
||||
if (hexdump.valuesPerLine > 0 && current == hexdump.valuesPerLine) {
|
||||
stream << std::endl;
|
||||
current = 0;
|
||||
}
|
||||
|
@ -62,6 +89,5 @@ std::ostream& operator<<(std::ostream& stream, HexDump const* hexdump) {
|
|||
return stream;
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream, HexDump const& hexdump) {
|
||||
return operator<<(stream, &hexdump);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -390,6 +390,11 @@ Slice Slice::translateUnchecked() const {
|
|||
return Slice();
|
||||
}
|
||||
|
||||
std::string Slice::toHex() const {
|
||||
HexDump dump(this);
|
||||
return dump.toString();
|
||||
}
|
||||
|
||||
std::string Slice::toJson(Options const* options) const {
|
||||
std::string buffer;
|
||||
StringSink sink(&buffer);
|
||||
|
|
16
CHANGELOG
16
CHANGELOG
|
@ -1,6 +1,11 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* added input file type `auto` for arangoimp so it can automatically detect the
|
||||
type of the input file from the filename extension
|
||||
|
||||
* fixed variables parsing in GraphQL
|
||||
|
||||
* added `--translate` option for arangoimp to translate attribute names from
|
||||
the input files to attriubte names expected by ArangoDB
|
||||
|
||||
|
@ -12,6 +17,8 @@ devel
|
|||
|
||||
`--translate` works for CSV and TSV inputs only.
|
||||
|
||||
* changed default value for `--server.max-packet-size` from 128 MB to 256 MB
|
||||
|
||||
* fixed issue #2350
|
||||
|
||||
* fixed issue #2349
|
||||
|
@ -26,11 +33,11 @@ devel
|
|||
for debugging.
|
||||
|
||||
* always validate incoming JSON HTTP requests for duplicate attribute names
|
||||
|
||||
|
||||
Incoming JSON data with duplicate attribute names will now be rejected as
|
||||
invalid. Previous versions of ArangoDB only validated the uniqueness of
|
||||
attribute names inside incoming JSON for some API endpoints, but not
|
||||
consistently for all APIs.
|
||||
consistently for all APIs.
|
||||
|
||||
* don't let read-only transactions block the WAL collector
|
||||
|
||||
|
@ -40,7 +47,6 @@ devel
|
|||
|
||||
* arangoimp: fixed issue #2214
|
||||
|
||||
|
||||
v3.2.alpha2 (2017-02-20)
|
||||
------------------------
|
||||
|
||||
|
@ -112,6 +118,10 @@ v3.2.alpha1 (2017-02-05)
|
|||
v3.1.13 (XXXX-XX-XX)
|
||||
--------------------
|
||||
|
||||
* fixed variables parsing in GraphQL
|
||||
|
||||
* fixed issue #2214
|
||||
|
||||
* fixed issue #2342
|
||||
|
||||
* changed thread handling to queue only user requests on coordinator
|
||||
|
|
|
@ -919,8 +919,8 @@ list(INSERT SYSTEM_LIBRARIES 0
|
|||
${OPENSSL_LIBRARIES}
|
||||
libcurl
|
||||
${BASE_LIBS}
|
||||
${CMAKE_DL_LIBS}
|
||||
${SYS_LIBS}
|
||||
${CMAKE_DL_LIBS}
|
||||
${CMAKE_THREAD_LIBS_INIT}
|
||||
)
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ example user records to import:
|
|||
To import these records, all you need to do is to put them into a file (with one
|
||||
line for each record to import) and run the following command:
|
||||
|
||||
> arangoimp --file "data.json" --type json --collection "users"
|
||||
> arangoimp --file "data.json" --type jsonl --collection "users"
|
||||
|
||||
This will transfer the data to the server, import the records, and print a
|
||||
status summary. To show the intermediate progress during the import process, the
|
||||
|
@ -96,8 +96,9 @@ use the *--server.database* option when invoking _arangoimp_.
|
|||
### JSON input file formats
|
||||
|
||||
**Note**: *arangoimp* supports two formats when importing JSON data from
|
||||
a file. The first format (also used above) requires the input file to contain one
|
||||
complete JSON document in each line, e.g.
|
||||
a file. The first format that we also used above is commonly known as [jsonl](http://jsonlines.org)).
|
||||
However, in contrast to the JSONL specification it requires the input file to contain
|
||||
one complete JSON document in each line, e.g.
|
||||
|
||||
```js
|
||||
{ "_key": "one", "value": 1 }
|
||||
|
@ -106,6 +107,8 @@ complete JSON document in each line, e.g.
|
|||
...
|
||||
```
|
||||
|
||||
So one could argue that this is only a subset of JSONL.
|
||||
|
||||
The above format can be imported sequentially by _arangoimp_. It will read data
|
||||
from the input file in chunks and send it in batches to the server. Each batch
|
||||
will be about as big as specified in the command-line parameter *--batch-size*.
|
||||
|
|
|
@ -25,7 +25,7 @@ The name of the edge collection to be used.
|
|||
One or many vertex collections that can contain source vertices.
|
||||
|
||||
@RESTBODYPARAM{to,array,required,string}
|
||||
One or many edge collections that can contain target vertices.
|
||||
One or many vertex collections that can contain target vertices.
|
||||
|
||||
@RESTRETURNCODES
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@ The name of the edge collection to be used.
|
|||
One or many vertex collections that can contain source vertices.
|
||||
|
||||
@RESTBODYPARAM{to,array,required,string}
|
||||
One or many edge collections that can contain target vertices.
|
||||
One or many vertex collections that can contain target vertices.
|
||||
|
||||
@RESTRETURNCODES
|
||||
|
||||
|
|
|
@ -7,10 +7,10 @@ mkdir -p $HOME/bin/gold
|
|||
chmod a+x $HOME/bin/gold/ld
|
||||
|
||||
# prepare CCACHE
|
||||
(echo '#!/bin/bash'; echo 'ccache /usr/bin/gcc-4.9 "$@"') > $HOME/bin/gcc
|
||||
(echo '#!/bin/bash'; echo 'ccache /usr/bin/gcc-5 "$@"') > $HOME/bin/gcc
|
||||
chmod a+x $HOME/bin/gcc
|
||||
|
||||
(echo '#!/bin/bash'; echo 'ccache /usr/bin/g++-4.9 "$@"') > $HOME/bin/g++
|
||||
(echo '#!/bin/bash'; echo 'ccache /usr/bin/g++-5 "$@"') > $HOME/bin/g++
|
||||
chmod a+x $HOME/bin/g++
|
||||
|
||||
# prepare files for unit test
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "Aql/PlanCache.h"
|
||||
#include "Aql/QueryCache.h"
|
||||
#include "Aql/QueryList.h"
|
||||
#include "Aql/QueryProfile.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/WorkMonitor.h"
|
||||
|
@ -68,75 +69,6 @@ static std::atomic<TRI_voc_tick_t> NextQueryId(1);
|
|||
constexpr uint64_t DontCache = 0;
|
||||
}
|
||||
|
||||
/// @brief names of query phases / states
|
||||
static std::string StateNames[] = {
|
||||
"initializing", // INITIALIZATION
|
||||
"parsing", // PARSING
|
||||
"optimizing ast", // AST_OPTIMIZATION
|
||||
"loading collections", // LOADING_COLLECTIONS
|
||||
"instantiating plan", // PLAN_INSTANTIATION
|
||||
"optimizing plan", // PLAN_OPTIMIZATION
|
||||
"executing", // EXECUTION
|
||||
"finalizing", // FINALIZATION
|
||||
|
||||
"invalid" // INVALID
|
||||
};
|
||||
|
||||
// make sure the state strings and the actual states match
|
||||
static_assert(sizeof(StateNames) / sizeof(std::string) ==
|
||||
static_cast<size_t>(ExecutionState::INVALID_STATE) + 1,
|
||||
"invalid number of ExecutionState values");
|
||||
|
||||
/// @brief create a profile
|
||||
Profile::Profile(Query* query)
|
||||
: query(query), results(), stamp(query->startTime()), tracked(false) {
|
||||
auto queryList = query->vocbase()->queryList();
|
||||
|
||||
try {
|
||||
tracked = queryList->insert(query, stamp);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief destroy a profile
|
||||
Profile::~Profile() {
|
||||
// only remove from list when the query was inserted into it...
|
||||
if (tracked) {
|
||||
auto queryList = query->vocbase()->queryList();
|
||||
|
||||
try {
|
||||
queryList->remove(query, stamp);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief sets a state to done
|
||||
void Profile::setDone(ExecutionState state) {
|
||||
double const now = TRI_microtime();
|
||||
|
||||
if (state != ExecutionState::INVALID_STATE) {
|
||||
// record duration of state
|
||||
results.emplace_back(state, now - stamp);
|
||||
}
|
||||
|
||||
// set timestamp
|
||||
stamp = now;
|
||||
}
|
||||
|
||||
/// @brief convert the profile to VelocyPack
|
||||
std::shared_ptr<VPackBuilder> Profile::toVelocyPack() {
|
||||
auto result = std::make_shared<VPackBuilder>();
|
||||
{
|
||||
VPackObjectBuilder b(result.get());
|
||||
for (auto const& it : results) {
|
||||
result->add(StateNames[static_cast<int>(it.first)],
|
||||
VPackValue(it.second));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/// @brief global memory limit for AQL queries
|
||||
uint64_t Query::MemoryLimitValue = 0;
|
||||
|
||||
|
@ -162,7 +94,7 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
|
|||
_bindParameters(bindParameters),
|
||||
_options(options),
|
||||
_collections(vocbase),
|
||||
_state(INVALID_STATE),
|
||||
_state(QueryExecutionState::ValueType::INVALID_STATE),
|
||||
_trx(nullptr),
|
||||
_maxWarningCount(10),
|
||||
_warnings(),
|
||||
|
@ -229,7 +161,7 @@ Query::Query(bool contextOwnedByExterior, TRI_vocbase_t* vocbase,
|
|||
_queryBuilder(queryStruct),
|
||||
_options(options),
|
||||
_collections(vocbase),
|
||||
_state(INVALID_STATE),
|
||||
_state(QueryExecutionState::ValueType::INVALID_STATE),
|
||||
_trx(nullptr),
|
||||
_maxWarningCount(10),
|
||||
_warnings(),
|
||||
|
@ -458,7 +390,7 @@ void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) {
|
|||
TRI_ASSERT(registry != nullptr);
|
||||
|
||||
init();
|
||||
enterState(PARSING);
|
||||
enterState(QueryExecutionState::ValueType::PARSING);
|
||||
|
||||
std::unique_ptr<ExecutionPlan> plan;
|
||||
|
||||
|
@ -487,7 +419,7 @@ void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) {
|
|||
ExecutionPlan::getCollectionsFromVelocyPack(_ast.get(), slice);
|
||||
_ast->variables()->fromVelocyPack(slice);
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
enterState(QueryExecutionState::ValueType::LOADING_COLLECTIONS);
|
||||
|
||||
int res = trx->addCollections(*_collections.collections());
|
||||
|
||||
|
@ -499,7 +431,7 @@ void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
enterState(QueryExecutionState::ValueType::PLAN_INSTANTIATION);
|
||||
|
||||
plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), slice));
|
||||
|
||||
|
@ -525,7 +457,7 @@ void Query::prepare(QueryRegistry* registry, uint64_t queryStringHash) {
|
|||
#endif
|
||||
}
|
||||
|
||||
enterState(EXECUTION);
|
||||
enterState(QueryExecutionState::ValueType::EXECUTION);
|
||||
|
||||
TRI_ASSERT(_engine == nullptr);
|
||||
// note that the engine returned here may already be present in our
|
||||
|
@ -575,11 +507,11 @@ ExecutionPlan* Query::prepare() {
|
|||
if (_queryString != nullptr) {
|
||||
// we have an AST
|
||||
// optimize the ast
|
||||
enterState(AST_OPTIMIZATION);
|
||||
enterState(QueryExecutionState::ValueType::AST_OPTIMIZATION);
|
||||
|
||||
_ast->validateAndOptimize();
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
enterState(QueryExecutionState::ValueType::LOADING_COLLECTIONS);
|
||||
|
||||
int res = _trx->begin();
|
||||
|
||||
|
@ -587,7 +519,7 @@ ExecutionPlan* Query::prepare() {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
enterState(QueryExecutionState::ValueType::PLAN_INSTANTIATION);
|
||||
plan.reset(ExecutionPlan::instantiateFromAst(_ast.get()));
|
||||
|
||||
if (plan.get() == nullptr) {
|
||||
|
@ -596,7 +528,7 @@ ExecutionPlan* Query::prepare() {
|
|||
}
|
||||
|
||||
// Run the query optimizer:
|
||||
enterState(PLAN_OPTIMIZATION);
|
||||
enterState(QueryExecutionState::ValueType::PLAN_OPTIMIZATION);
|
||||
arangodb::aql::Optimizer opt(maxNumberOfPlans());
|
||||
// get enabled/disabled rules
|
||||
opt.createPlans(plan.release(), getRulesFromOptions(),
|
||||
|
@ -612,7 +544,7 @@ ExecutionPlan* Query::prepare() {
|
|||
// we need to add them to the transaction now (otherwise the query will
|
||||
// fail)
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
enterState(QueryExecutionState::ValueType::LOADING_COLLECTIONS);
|
||||
|
||||
int res = trx->addCollections(*_collections.collections());
|
||||
|
||||
|
@ -624,7 +556,7 @@ ExecutionPlan* Query::prepare() {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
enterState(QueryExecutionState::ValueType::PLAN_INSTANTIATION);
|
||||
|
||||
// we have an execution plan in VelocyPack format
|
||||
plan.reset(ExecutionPlan::instantiateFromVelocyPack(_ast.get(), _queryBuilder->slice()));
|
||||
|
@ -788,7 +720,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
|
|||
auto stats = std::make_shared<VPackBuilder>();
|
||||
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
|
||||
|
||||
enterState(FINALIZATION);
|
||||
enterState(QueryExecutionState::ValueType::FINALIZATION);
|
||||
|
||||
result.warnings = warningsToVelocyPack();
|
||||
result.result = resultBuilder;
|
||||
|
@ -800,7 +732,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
|
|||
|
||||
// patch stats in place
|
||||
// we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end
|
||||
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime);
|
||||
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), runTime());
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
|
||||
<< "Query::execute:returning"
|
||||
|
@ -810,22 +742,22 @@ QueryResult Query::execute(QueryRegistry* registry) {
|
|||
} catch (arangodb::basics::Exception const& ex) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(ex.code());
|
||||
return QueryResult(ex.code(), ex.message() + getStateString());
|
||||
return QueryResult(ex.code(), ex.message() + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (std::bad_alloc const&) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY);
|
||||
return QueryResult(
|
||||
TRI_ERROR_OUT_OF_MEMORY,
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (std::exception const& ex) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
|
||||
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (...) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + QueryExecutionState::toStringWithPrefix(_state));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -975,7 +907,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
|
|||
auto stats = std::make_shared<VPackBuilder>();
|
||||
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
|
||||
|
||||
enterState(FINALIZATION);
|
||||
enterState(QueryExecutionState::ValueType::FINALIZATION);
|
||||
|
||||
result.warnings = warningsToVelocyPack();
|
||||
result.stats = stats;
|
||||
|
@ -986,7 +918,7 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
|
|||
|
||||
// patch executionTime stats value in place
|
||||
// we do this because "executionTime" should include the whole span of the execution and we have to set it at the very end
|
||||
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), TRI_microtime() - _startTime);
|
||||
basics::VelocyPackHelper::patchDouble(result.stats->slice().get("executionTime"), runTime());
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
|
||||
<< "Query::executeV8:returning"
|
||||
|
@ -996,22 +928,22 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
|
|||
} catch (arangodb::basics::Exception const& ex) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(ex.code());
|
||||
return QueryResultV8(ex.code(), ex.message() + getStateString());
|
||||
return QueryResultV8(ex.code(), ex.message() + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (std::bad_alloc const&) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(TRI_ERROR_OUT_OF_MEMORY);
|
||||
return QueryResultV8(
|
||||
TRI_ERROR_OUT_OF_MEMORY,
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (std::exception const& ex) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResultV8(TRI_ERROR_INTERNAL, ex.what() + getStateString());
|
||||
return QueryResultV8(TRI_ERROR_INTERNAL, ex.what() + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (...) {
|
||||
setExecutionTime();
|
||||
cleanupPlanAndEngine(TRI_ERROR_INTERNAL);
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + QueryExecutionState::toStringWithPrefix(_state));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1040,7 +972,7 @@ QueryResult Query::parse() {
|
|||
QueryResult Query::explain() {
|
||||
try {
|
||||
init();
|
||||
enterState(PARSING);
|
||||
enterState(QueryExecutionState::ValueType::PARSING);
|
||||
|
||||
Parser parser(this);
|
||||
|
||||
|
@ -1048,11 +980,11 @@ QueryResult Query::explain() {
|
|||
// put in bind parameters
|
||||
parser.ast()->injectBindParameters(_bindParameters);
|
||||
|
||||
enterState(AST_OPTIMIZATION);
|
||||
enterState(QueryExecutionState::ValueType::AST_OPTIMIZATION);
|
||||
// optimize and validate the ast
|
||||
parser.ast()->validateAndOptimize();
|
||||
|
||||
enterState(LOADING_COLLECTIONS);
|
||||
enterState(QueryExecutionState::ValueType::LOADING_COLLECTIONS);
|
||||
|
||||
// create the transaction object, but do not start it yet
|
||||
_trx = new AqlTransaction(createTransactionContext(),
|
||||
|
@ -1065,7 +997,7 @@ QueryResult Query::explain() {
|
|||
THROW_ARANGO_EXCEPTION_MESSAGE(res, buildErrorMessage(res));
|
||||
}
|
||||
|
||||
enterState(PLAN_INSTANTIATION);
|
||||
enterState(QueryExecutionState::ValueType::PLAN_INSTANTIATION);
|
||||
ExecutionPlan* plan = ExecutionPlan::instantiateFromAst(parser.ast());
|
||||
|
||||
if (plan == nullptr) {
|
||||
|
@ -1074,12 +1006,12 @@ QueryResult Query::explain() {
|
|||
}
|
||||
|
||||
// Run the query optimizer:
|
||||
enterState(PLAN_OPTIMIZATION);
|
||||
enterState(QueryExecutionState::ValueType::PLAN_OPTIMIZATION);
|
||||
arangodb::aql::Optimizer opt(maxNumberOfPlans());
|
||||
// get enabled/disabled rules
|
||||
opt.createPlans(plan, getRulesFromOptions(), inspectSimplePlans());
|
||||
|
||||
enterState(FINALIZATION);
|
||||
enterState(QueryExecutionState::ValueType::FINALIZATION);
|
||||
|
||||
QueryResult result(TRI_ERROR_NO_ERROR);
|
||||
|
||||
|
@ -1124,16 +1056,16 @@ QueryResult Query::explain() {
|
|||
|
||||
return result;
|
||||
} catch (arangodb::basics::Exception const& ex) {
|
||||
return QueryResult(ex.code(), ex.message() + getStateString());
|
||||
return QueryResult(ex.code(), ex.message() + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (std::bad_alloc const&) {
|
||||
return QueryResult(
|
||||
TRI_ERROR_OUT_OF_MEMORY,
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + getStateString());
|
||||
TRI_errno_string(TRI_ERROR_OUT_OF_MEMORY) + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (std::exception const& ex) {
|
||||
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + getStateString());
|
||||
return QueryResult(TRI_ERROR_INTERNAL, ex.what() + QueryExecutionState::toStringWithPrefix(_state));
|
||||
} catch (...) {
|
||||
return QueryResult(TRI_ERROR_INTERNAL,
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + getStateString());
|
||||
TRI_errno_string(TRI_ERROR_INTERNAL) + QueryExecutionState::toStringWithPrefix(_state));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1279,8 +1211,8 @@ void Query::init() {
|
|||
TRI_ASSERT(_id != 0);
|
||||
|
||||
TRI_ASSERT(_profile == nullptr);
|
||||
_profile.reset(new Profile(this));
|
||||
enterState(INITIALIZATION);
|
||||
_profile.reset(new QueryProfile(this));
|
||||
enterState(QueryExecutionState::ValueType::INITIALIZATION);
|
||||
|
||||
TRI_ASSERT(_ast == nullptr);
|
||||
_ast.reset(new Ast(this));
|
||||
|
@ -1432,7 +1364,7 @@ std::vector<std::string> Query::getRulesFromOptions() const {
|
|||
}
|
||||
|
||||
/// @brief enter a new state
|
||||
void Query::enterState(ExecutionState state) {
|
||||
void Query::enterState(QueryExecutionState::ValueType state) {
|
||||
LOG_TOPIC(DEBUG, Logger::QUERIES) << TRI_microtime() - _startTime << " "
|
||||
<< "Query::enterState: " << state
|
||||
<< " this: " << (uintptr_t) this;
|
||||
|
@ -1445,11 +1377,6 @@ void Query::enterState(ExecutionState state) {
|
|||
_state = state;
|
||||
}
|
||||
|
||||
/// @brief get a description of the query's current state
|
||||
std::string Query::getStateString() const {
|
||||
return std::string(" (while " + StateNames[_state] + ")");
|
||||
}
|
||||
|
||||
/// @brief cleanup plan and engine for current query
|
||||
void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
|
||||
if (_engine != nullptr) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "Aql/BindParameters.h"
|
||||
#include "Aql/Collections.h"
|
||||
#include "Aql/Graphs.h"
|
||||
#include "Aql/QueryExecutionState.h"
|
||||
#include "Aql/QueryResources.h"
|
||||
#include "Aql/QueryResultV8.h"
|
||||
#include "Aql/ResourceUsage.h"
|
||||
|
@ -60,44 +61,12 @@ class ExecutionEngine;
|
|||
class ExecutionPlan;
|
||||
class Executor;
|
||||
class Query;
|
||||
struct QueryProfile;
|
||||
class QueryRegistry;
|
||||
|
||||
/// @brief equery part
|
||||
enum QueryPart { PART_MAIN, PART_DEPENDENT };
|
||||
|
||||
/// @brief execution states
|
||||
enum ExecutionState {
|
||||
INITIALIZATION = 0,
|
||||
PARSING,
|
||||
AST_OPTIMIZATION,
|
||||
LOADING_COLLECTIONS,
|
||||
PLAN_INSTANTIATION,
|
||||
PLAN_OPTIMIZATION,
|
||||
EXECUTION,
|
||||
FINALIZATION,
|
||||
|
||||
INVALID_STATE
|
||||
};
|
||||
|
||||
struct Profile {
|
||||
Profile(Profile const&) = delete;
|
||||
Profile& operator=(Profile const&) = delete;
|
||||
|
||||
explicit Profile(Query*);
|
||||
|
||||
~Profile();
|
||||
|
||||
void setDone(ExecutionState);
|
||||
|
||||
/// @brief convert the profile to VelocyPack
|
||||
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack();
|
||||
|
||||
Query* query;
|
||||
std::vector<std::pair<ExecutionState, double>> results;
|
||||
double stamp;
|
||||
bool tracked;
|
||||
};
|
||||
|
||||
/// @brief an AQL query
|
||||
class Query {
|
||||
private:
|
||||
|
@ -135,6 +104,9 @@ class Query {
|
|||
|
||||
/// @brief return the start timestamp of the query
|
||||
double startTime () const { return _startTime; }
|
||||
|
||||
/// @brief return the current runtime of the query
|
||||
double runTime () const { return TRI_microtime() - _startTime; }
|
||||
|
||||
/// @brief whether or not the query is killed
|
||||
inline bool killed() const { return _killed; }
|
||||
|
@ -366,6 +338,8 @@ class Query {
|
|||
|
||||
return value.getNumericValue<T>();
|
||||
}
|
||||
|
||||
QueryExecutionState::ValueType state() const { return _state; }
|
||||
|
||||
private:
|
||||
/// @brief read the "optimizer.inspectSimplePlans" section from the options
|
||||
|
@ -378,7 +352,7 @@ class Query {
|
|||
std::string buildErrorMessage(int errorCode) const;
|
||||
|
||||
/// @brief enter a new state
|
||||
void enterState(ExecutionState);
|
||||
void enterState(QueryExecutionState::ValueType);
|
||||
|
||||
/// @brief cleanup plan and engine for current query
|
||||
void cleanupPlanAndEngine(int, VPackBuilder* statsBuilder = nullptr);
|
||||
|
@ -434,11 +408,11 @@ class Query {
|
|||
std::unique_ptr<Ast> _ast;
|
||||
|
||||
/// @brief query execution profile
|
||||
std::unique_ptr<Profile> _profile;
|
||||
std::unique_ptr<QueryProfile> _profile;
|
||||
|
||||
/// @brief current state the query is in (used for profiling and error
|
||||
/// messages)
|
||||
ExecutionState _state;
|
||||
QueryExecutionState::ValueType _state;
|
||||
|
||||
/// @brief the ExecutionPlan object, if the query is prepared
|
||||
std::shared_ptr<ExecutionPlan> _plan;
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "QueryExecutionState.h"
|
||||
|
||||
using namespace arangodb::aql;
|
||||
|
||||
/// @brief names of query phases / states
|
||||
static std::string const StateNames[] = {
|
||||
"initializing", // INITIALIZATION
|
||||
"parsing", // PARSING
|
||||
"optimizing ast", // AST_OPTIMIZATION
|
||||
"loading collections", // LOADING_COLLECTIONS
|
||||
"instantiating plan", // PLAN_INSTANTIATION
|
||||
"optimizing plan", // PLAN_OPTIMIZATION
|
||||
"executing", // EXECUTION
|
||||
"finalizing", // FINALIZATION
|
||||
"finished", // FINISHED
|
||||
|
||||
"invalid" // INVALID
|
||||
};
|
||||
|
||||
// make sure the state strings and the actual states match
|
||||
static_assert(sizeof(StateNames) / sizeof(std::string) ==
|
||||
static_cast<size_t>(QueryExecutionState::ValueType::INVALID_STATE) + 1,
|
||||
"invalid number of ExecutionState values");
|
||||
|
||||
/// @brief get a description of the query's current state
|
||||
std::string QueryExecutionState::toString(QueryExecutionState::ValueType state) {
|
||||
return StateNames[static_cast<int>(state)];
|
||||
}
|
||||
|
||||
/// @brief get a description of the query's current state, prefixed with " (while "
|
||||
std::string QueryExecutionState::toStringWithPrefix(QueryExecutionState::ValueType state) {
|
||||
return std::string(" (while " + StateNames[static_cast<int>(state)] + ")");
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream, arangodb::aql::QueryExecutionState::ValueType state) {
|
||||
stream << StateNames[static_cast<int>(state)];
|
||||
return stream;
|
||||
}
|
|
@ -0,0 +1,59 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_QUERY_EXECUTION_STATE_H
|
||||
#define ARANGOD_AQL_QUERY_EXECUTION_STATE_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include <iosfwd>
|
||||
|
||||
namespace arangodb {
|
||||
namespace aql {
|
||||
namespace QueryExecutionState {
|
||||
|
||||
/// @brief execution states
|
||||
enum class ValueType {
|
||||
INITIALIZATION = 0,
|
||||
PARSING,
|
||||
AST_OPTIMIZATION,
|
||||
LOADING_COLLECTIONS,
|
||||
PLAN_INSTANTIATION,
|
||||
PLAN_OPTIMIZATION,
|
||||
EXECUTION,
|
||||
FINALIZATION,
|
||||
FINISHED,
|
||||
|
||||
INVALID_STATE
|
||||
};
|
||||
|
||||
std::string toString(QueryExecutionState::ValueType state);
|
||||
std::string toStringWithPrefix(QueryExecutionState::ValueType state);
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream&, arangodb::aql::QueryExecutionState::ValueType);
|
||||
|
||||
#endif
|
|
@ -23,22 +23,20 @@
|
|||
|
||||
#include "Aql/QueryList.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
using namespace arangodb::aql;
|
||||
|
||||
QueryEntry::QueryEntry(arangodb::aql::Query const* query, double started)
|
||||
: query(query), started(started) {}
|
||||
|
||||
QueryEntryCopy::QueryEntryCopy(TRI_voc_tick_t id,
|
||||
std::string const& queryString, double started,
|
||||
double runTime, std::string const& queryState)
|
||||
: id(id), queryString(queryString), started(started), runTime(runTime),
|
||||
queryState(queryState) {}
|
||||
std::string&& queryString, double started,
|
||||
double runTime, QueryExecutionState::ValueType state)
|
||||
: id(id), queryString(std::move(queryString)), started(started), runTime(runTime),
|
||||
state(state) {}
|
||||
|
||||
double const QueryList::DefaultSlowQueryThreshold = 10.0;
|
||||
size_t const QueryList::DefaultMaxSlowQueries = 64;
|
||||
|
@ -62,32 +60,26 @@ QueryList::QueryList(TRI_vocbase_t*)
|
|||
QueryList::~QueryList() {
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
|
||||
for (auto& it : _current) {
|
||||
delete it.second;
|
||||
}
|
||||
_current.clear();
|
||||
_slow.clear();
|
||||
}
|
||||
|
||||
/// @brief insert a query
|
||||
bool QueryList::insert(Query const* query, double stamp) {
|
||||
bool QueryList::insert(Query const* query) {
|
||||
// not enable or no query string
|
||||
if (!_enabled || query == nullptr || query->queryString() == nullptr) {
|
||||
return false;
|
||||
}
|
||||
|
||||
try {
|
||||
auto entry = std::make_unique<QueryEntry>(query, stamp);
|
||||
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
|
||||
TRI_IF_FAILURE("QueryList::insert") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
auto it = _current.emplace(query->id(), entry.get());
|
||||
auto it = _current.emplace(query->id(), query);
|
||||
if (it.second) {
|
||||
entry.release();
|
||||
return true;
|
||||
}
|
||||
} catch (...) {
|
||||
|
@ -97,7 +89,7 @@ bool QueryList::insert(Query const* query, double stamp) {
|
|||
}
|
||||
|
||||
/// @brief remove a query
|
||||
void QueryList::remove(Query const* query, double now) {
|
||||
void QueryList::remove(Query const* query) {
|
||||
// we're intentionally not checking _enabled here...
|
||||
|
||||
// note: there is the possibility that a query got inserted when the
|
||||
|
@ -111,103 +103,65 @@ void QueryList::remove(Query const* query, double now) {
|
|||
}
|
||||
|
||||
size_t const maxLength = _maxQueryStringLength;
|
||||
QueryEntry* entry = nullptr;
|
||||
|
||||
{
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
auto it = _current.find(query->id());
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
auto it = _current.find(query->id());
|
||||
|
||||
if (it != _current.end()) {
|
||||
entry = (*it).second;
|
||||
_current.erase(it);
|
||||
if (it != _current.end()) {
|
||||
Query const* query = (*it).second;
|
||||
_current.erase(it);
|
||||
|
||||
TRI_ASSERT(entry != nullptr);
|
||||
TRI_ASSERT(query != nullptr);
|
||||
double const started = query->startTime();
|
||||
double const now = TRI_microtime();
|
||||
|
||||
try {
|
||||
// check if we need to push the query into the list of slow queries
|
||||
if (_trackSlowQueries && _slowQueryThreshold >= 0.0 &&
|
||||
now - entry->started >= _slowQueryThreshold) {
|
||||
// yes.
|
||||
try {
|
||||
// check if we need to push the query into the list of slow queries
|
||||
if (_trackSlowQueries && _slowQueryThreshold >= 0.0 &&
|
||||
now - started >= _slowQueryThreshold) {
|
||||
// yes.
|
||||
|
||||
char const* queryString = entry->query->queryString();
|
||||
size_t const originalLength = entry->query->queryLength();
|
||||
size_t length = originalLength;
|
||||
|
||||
if (length > maxLength) {
|
||||
length = maxLength;
|
||||
TRI_ASSERT(length <= originalLength);
|
||||
|
||||
// do not create invalid UTF-8 sequences
|
||||
while (length > 0) {
|
||||
uint8_t c = queryString[length - 1];
|
||||
if ((c & 128) == 0) {
|
||||
// single-byte character
|
||||
break;
|
||||
}
|
||||
--length;
|
||||
|
||||
// start of a multi-byte sequence
|
||||
if ((c & 192) == 192) {
|
||||
// decrease length by one more, so we the string contains the
|
||||
// last part of the previous (multi-byte?) sequence
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TRI_IF_FAILURE("QueryList::remove") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
std::string q(queryString, length);
|
||||
q.append(originalLength > maxLength ? "..." : "");
|
||||
|
||||
LOG_TOPIC(WARN, Logger::QUERIES) << "slow query: '" << q << "', took: " << Logger::FIXED(now - entry->started);
|
||||
|
||||
_slow.emplace_back(QueryEntryCopy(
|
||||
entry->query->id(),
|
||||
std::move(q),
|
||||
entry->started, now - entry->started,
|
||||
std::string(" (while finished)")));
|
||||
|
||||
if (++_slowCount > _maxSlowQueries) {
|
||||
// free first element
|
||||
_slow.pop_front();
|
||||
--_slowCount;
|
||||
}
|
||||
TRI_IF_FAILURE("QueryList::remove") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (entry != nullptr) {
|
||||
delete entry;
|
||||
std::string q = extractQueryString(query, maxLength);
|
||||
|
||||
LOG_TOPIC(WARN, Logger::QUERIES) << "slow query: '" << q << "', took: " << Logger::FIXED(now - started);
|
||||
|
||||
_slow.emplace_back(QueryEntryCopy(
|
||||
query->id(),
|
||||
std::move(q),
|
||||
started, now - started,
|
||||
QueryExecutionState::ValueType::FINISHED));
|
||||
|
||||
if (++_slowCount > _maxSlowQueries) {
|
||||
// free first element
|
||||
_slow.pop_front();
|
||||
--_slowCount;
|
||||
}
|
||||
}
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief kills a query
|
||||
int QueryList::kill(TRI_voc_tick_t id) {
|
||||
std::string queryString;
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
|
||||
{
|
||||
WRITE_LOCKER(writeLocker, _lock);
|
||||
auto it = _current.find(id);
|
||||
|
||||
auto it = _current.find(id);
|
||||
|
||||
if (it == _current.end()) {
|
||||
return TRI_ERROR_QUERY_NOT_FOUND;
|
||||
}
|
||||
|
||||
auto entry = (*it).second;
|
||||
queryString.assign(entry->query->queryString(),
|
||||
entry->query->queryLength());
|
||||
const_cast<arangodb::aql::Query*>(entry->query)->killed(true);
|
||||
if (it == _current.end()) {
|
||||
return TRI_ERROR_QUERY_NOT_FOUND;
|
||||
}
|
||||
|
||||
// log outside the lock
|
||||
Query const* query = (*it).second;
|
||||
StringRef queryString(query->queryString(), query->queryLength());
|
||||
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "killing AQL query " << id << " '" << queryString << "'";
|
||||
|
||||
const_cast<arangodb::aql::Query*>(query)->killed(true);
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
|
@ -218,20 +172,18 @@ uint64_t QueryList::killAll(bool silent) {
|
|||
WRITE_LOCKER(writeLocker, _lock);
|
||||
|
||||
for (auto& it : _current) {
|
||||
auto entry = it.second;
|
||||
const_cast<arangodb::aql::Query*>(entry->query)->killed(true);
|
||||
|
||||
++killed;
|
||||
|
||||
std::string queryString(entry->query->queryString(),
|
||||
entry->query->queryLength());
|
||||
Query const* query = it.second;
|
||||
|
||||
StringRef queryString(query->queryString(), query->queryLength());
|
||||
|
||||
|
||||
if (silent) {
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "killing AQL query " << entry->query->id() << " '" << queryString << "'";
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "killing AQL query " << query->id() << " '" << queryString << "'";
|
||||
} else {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "killing AQL query " << entry->query->id() << " '" << queryString << "'";
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "killing AQL query " << query->id() << " '" << queryString << "'";
|
||||
}
|
||||
|
||||
const_cast<arangodb::aql::Query*>(query)->killed(true);
|
||||
++killed;
|
||||
}
|
||||
|
||||
return killed;
|
||||
|
@ -249,44 +201,19 @@ std::vector<QueryEntryCopy> QueryList::listCurrent() {
|
|||
result.reserve(_current.size());
|
||||
|
||||
for (auto const& it : _current) {
|
||||
auto entry = it.second;
|
||||
Query const* query = it.second;
|
||||
|
||||
if (entry == nullptr || entry->query->queryString() == nullptr) {
|
||||
if (query == nullptr || query->queryString() == nullptr) {
|
||||
continue;
|
||||
}
|
||||
|
||||
char const* queryString = entry->query->queryString();
|
||||
size_t const originalLength = entry->query->queryLength();
|
||||
size_t length = originalLength;
|
||||
|
||||
if (length > maxLength) {
|
||||
length = maxLength;
|
||||
TRI_ASSERT(length <= originalLength);
|
||||
|
||||
// do not create invalid UTF-8 sequences
|
||||
while (length > 0) {
|
||||
uint8_t c = queryString[length - 1];
|
||||
if ((c & 128) == 0) {
|
||||
// single-byte character
|
||||
break;
|
||||
}
|
||||
--length;
|
||||
|
||||
// start of a multi-byte sequence
|
||||
if ((c & 192) == 192) {
|
||||
// decrease length by one more, so we the string contains the
|
||||
// last part of the previous (multi-byte?) sequence
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
double const started = query->startTime();
|
||||
|
||||
result.emplace_back(
|
||||
QueryEntryCopy(entry->query->id(),
|
||||
std::string(queryString, length)
|
||||
.append(originalLength > maxLength ? "..." : ""),
|
||||
entry->started, now - entry->started,
|
||||
entry->query->getStateString()));
|
||||
QueryEntryCopy(query->id(),
|
||||
extractQueryString(query, maxLength),
|
||||
started, now - started,
|
||||
query->state()));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -312,3 +239,40 @@ void QueryList::clearSlow() {
|
|||
_slow.clear();
|
||||
_slowCount = 0;
|
||||
}
|
||||
|
||||
std::string QueryList::extractQueryString(Query const* query, size_t maxLength) const {
|
||||
char const* queryString = query->queryString();
|
||||
size_t length = query->queryLength();
|
||||
|
||||
if (length > maxLength) {
|
||||
std::string q;
|
||||
|
||||
// query string needs truncation
|
||||
length = maxLength;
|
||||
|
||||
// do not create invalid UTF-8 sequences
|
||||
while (length > 0) {
|
||||
uint8_t c = queryString[length - 1];
|
||||
if ((c & 128) == 0) {
|
||||
// single-byte character
|
||||
break;
|
||||
}
|
||||
--length;
|
||||
|
||||
// start of a multi-byte sequence
|
||||
if ((c & 192) == 192) {
|
||||
// decrease length by one more, so we the string contains the
|
||||
// last part of the previous (multi-byte?) sequence
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
q.reserve(length + 3);
|
||||
q.append(queryString, length);
|
||||
q.append("...", 3);
|
||||
return q;
|
||||
}
|
||||
|
||||
// no truncation
|
||||
return std::string(queryString, length);
|
||||
}
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define ARANGOD_AQL_QUERY_LIST_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/QueryExecutionState.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
|
@ -41,25 +42,18 @@ class Query;
|
|||
// --SECTION-- struct QueryEntry
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
struct QueryEntry {
|
||||
QueryEntry(arangodb::aql::Query const*, double);
|
||||
|
||||
arangodb::aql::Query const* query;
|
||||
double const started;
|
||||
};
|
||||
|
||||
struct QueryEntryCopy {
|
||||
QueryEntryCopy (TRI_voc_tick_t,
|
||||
std::string const&,
|
||||
double,
|
||||
double,
|
||||
std::string const&);
|
||||
QueryEntryCopy (TRI_voc_tick_t id,
|
||||
std::string&& queryString,
|
||||
double started,
|
||||
double runTime,
|
||||
QueryExecutionState::ValueType state);
|
||||
|
||||
TRI_voc_tick_t id;
|
||||
std::string queryString;
|
||||
double started;
|
||||
double runTime;
|
||||
std::string queryState;
|
||||
QueryExecutionState::ValueType state;
|
||||
};
|
||||
|
||||
class QueryList {
|
||||
|
@ -143,10 +137,10 @@ class QueryList {
|
|||
}
|
||||
|
||||
/// @brief enter a query
|
||||
bool insert(Query const*, double);
|
||||
bool insert(Query const*);
|
||||
|
||||
/// @brief remove a query
|
||||
void remove(Query const*, double);
|
||||
void remove(Query const*);
|
||||
|
||||
/// @brief kills a query
|
||||
int kill(TRI_voc_tick_t);
|
||||
|
@ -163,12 +157,15 @@ class QueryList {
|
|||
/// @brief clear the list of slow queries
|
||||
void clearSlow();
|
||||
|
||||
private:
|
||||
std::string extractQueryString(Query const* query, size_t maxLength) const;
|
||||
|
||||
private:
|
||||
/// @brief r/w lock for the list
|
||||
arangodb::basics::ReadWriteLock _lock;
|
||||
|
||||
/// @brief list of current queries
|
||||
std::unordered_map<TRI_voc_tick_t, QueryEntry*> _current;
|
||||
std::unordered_map<TRI_voc_tick_t, Query const*> _current;
|
||||
|
||||
/// @brief list of slow queries
|
||||
std::list<QueryEntryCopy> _slow;
|
||||
|
|
|
@ -0,0 +1,84 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "QueryProfile.h"
|
||||
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/QueryList.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
/// @brief create a profile
|
||||
QueryProfile::QueryProfile(Query* query)
|
||||
: query(query), results(), stamp(query->startTime()), tracked(false) {
|
||||
auto queryList = query->vocbase()->queryList();
|
||||
|
||||
try {
|
||||
tracked = queryList->insert(query);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief destroy a profile
|
||||
QueryProfile::~QueryProfile() {
|
||||
// only remove from list when the query was inserted into it...
|
||||
if (tracked) {
|
||||
auto queryList = query->vocbase()->queryList();
|
||||
|
||||
try {
|
||||
queryList->remove(query);
|
||||
} catch (...) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// @brief sets a state to done
|
||||
void QueryProfile::setDone(QueryExecutionState::ValueType state) {
|
||||
double const now = TRI_microtime();
|
||||
|
||||
if (state != QueryExecutionState::ValueType::INVALID_STATE) {
|
||||
// record duration of state
|
||||
results.emplace_back(state, now - stamp);
|
||||
}
|
||||
|
||||
// set timestamp
|
||||
stamp = now;
|
||||
}
|
||||
|
||||
/// @brief convert the profile to VelocyPack
|
||||
std::shared_ptr<VPackBuilder> QueryProfile::toVelocyPack() {
|
||||
auto result = std::make_shared<VPackBuilder>();
|
||||
{
|
||||
VPackObjectBuilder b(result.get());
|
||||
for (auto const& it : results) {
|
||||
result->add(QueryExecutionState::toString(it.first),
|
||||
VPackValue(it.second));
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
|
@ -0,0 +1,63 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_QUERY_PROFILE_H
|
||||
#define ARANGOD_AQL_QUERY_PROFILE_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/QueryExecutionState.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
||||
namespace arangodb {
|
||||
|
||||
namespace velocypack {
|
||||
class Builder;
|
||||
}
|
||||
|
||||
namespace aql {
|
||||
class Query;
|
||||
|
||||
struct QueryProfile {
|
||||
QueryProfile(QueryProfile const&) = delete;
|
||||
QueryProfile& operator=(QueryProfile const&) = delete;
|
||||
|
||||
explicit QueryProfile(Query*);
|
||||
|
||||
~QueryProfile();
|
||||
|
||||
void setDone(QueryExecutionState::ValueType);
|
||||
|
||||
/// @brief convert the profile to VelocyPack
|
||||
std::shared_ptr<arangodb::velocypack::Builder> toVelocyPack();
|
||||
|
||||
Query* query;
|
||||
std::vector<std::pair<QueryExecutionState::ValueType, double>> results;
|
||||
double stamp;
|
||||
bool tracked;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -144,6 +144,8 @@ SET(ARANGOD_SOURCES
|
|||
Aql/OptimizerRules.cpp
|
||||
Aql/Parser.cpp
|
||||
Aql/PlanCache.cpp
|
||||
Aql/QueryExecutionState.cpp
|
||||
Aql/QueryProfile.cpp
|
||||
Aql/Quantifier.cpp
|
||||
Aql/Query.cpp
|
||||
Aql/QueryCache.cpp
|
||||
|
@ -180,9 +182,10 @@ SET(ARANGOD_SOURCES
|
|||
Cache/PlainCache.cpp
|
||||
Cache/Rebalancer.cpp
|
||||
Cache/State.cpp
|
||||
Cache/Transaction.cpp
|
||||
Cache/TransactionalBucket.cpp
|
||||
Cache/TransactionalCache.cpp
|
||||
Cache/TransactionWindow.cpp
|
||||
Cache/TransactionManager.cpp
|
||||
Cluster/AgencyCallback.cpp
|
||||
Cluster/AgencyCallbackRegistry.cpp
|
||||
Cluster/ClusterComm.cpp
|
||||
|
@ -397,9 +400,9 @@ target_link_libraries(arangoserver
|
|||
${MSVC_LIBS}
|
||||
${V8_LIBS}
|
||||
${ROCKSDB_LIBS}
|
||||
${SYSTEM_LIBRARIES}
|
||||
boost_boost
|
||||
boost_system
|
||||
${SYSTEM_LIBRARIES}
|
||||
)
|
||||
|
||||
add_executable(${BIN_ARANGOD}
|
||||
|
|
|
@ -38,6 +38,11 @@
|
|||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
uint64_t Cache::_evictionStatsCapacity = 1024;
|
||||
uint64_t Cache::_findStatsCapacity = 16384;
|
||||
|
||||
Cache::ConstructionGuard::ConstructionGuard() {}
|
||||
|
||||
Cache::Finding::Finding(CachedValue* v) : _value(v) {
|
||||
if (_value != nullptr) {
|
||||
_value->lease();
|
||||
|
@ -111,9 +116,30 @@ CachedValue* Cache::Finding::copy() const {
|
|||
return ((_value == nullptr) ? nullptr : _value->copy());
|
||||
}
|
||||
|
||||
void Cache::destroy(std::shared_ptr<Cache> cache) {
|
||||
if (cache.get() != nullptr) {
|
||||
cache->shutdown();
|
||||
Cache::Cache(ConstructionGuard guard, Manager* manager,
|
||||
Manager::MetadataItr metadata, bool allowGrowth,
|
||||
bool enableWindowedStats)
|
||||
: _state(),
|
||||
_allowGrowth(allowGrowth),
|
||||
_evictionStats(_evictionStatsCapacity),
|
||||
_insertionCount(0),
|
||||
_enableWindowedStats(enableWindowedStats),
|
||||
_findStats(nullptr),
|
||||
_findHits(0),
|
||||
_findMisses(0),
|
||||
_manager(manager),
|
||||
_metadata(metadata),
|
||||
_openOperations(0),
|
||||
_migrateRequestTime(std::chrono::steady_clock::now()),
|
||||
_resizeRequestTime(std::chrono::steady_clock::now()),
|
||||
_lastResizeRequestStatus(true) {
|
||||
if (_enableWindowedStats) {
|
||||
try {
|
||||
_findStats.reset(new StatBuffer(_findStatsCapacity));
|
||||
} catch (std::bad_alloc) {
|
||||
_findStats.reset(nullptr);
|
||||
_enableWindowedStats = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -147,8 +173,10 @@ std::pair<double, double> Cache::hitRates() {
|
|||
|
||||
uint64_t currentMisses = _findMisses.load();
|
||||
uint64_t currentHits = _findHits.load();
|
||||
lifetimeRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
if (currentMisses + currentHits > 0) {
|
||||
lifetimeRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
}
|
||||
|
||||
if (_enableWindowedStats && _findStats.get() != nullptr) {
|
||||
auto stats = _findStats->getFrequencies();
|
||||
|
@ -166,8 +194,10 @@ std::pair<double, double> Cache::hitRates() {
|
|||
currentHits = (*stats)[1].second;
|
||||
currentMisses = (*stats)[0].second;
|
||||
}
|
||||
windowedRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
if (currentHits + currentMisses > 0) {
|
||||
windowedRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -224,38 +254,9 @@ bool Cache::isResizing() {
|
|||
return resizing;
|
||||
}
|
||||
|
||||
Cache::Cache(Manager* manager, uint64_t requestedLimit, bool allowGrowth,
|
||||
bool enableWindowedStats, std::function<void(Cache*)> deleter,
|
||||
uint64_t size)
|
||||
: _state(),
|
||||
_allowGrowth(allowGrowth),
|
||||
_evictionStats(1024),
|
||||
_insertionCount(0),
|
||||
_enableWindowedStats(enableWindowedStats),
|
||||
_findStats(nullptr),
|
||||
_findHits(0),
|
||||
_findMisses(0),
|
||||
_manager(manager),
|
||||
_openOperations(0),
|
||||
_migrateRequestTime(std::chrono::steady_clock::now()),
|
||||
_resizeRequestTime(std::chrono::steady_clock::now()) {
|
||||
try {
|
||||
uint64_t fullSize =
|
||||
size + _evictionStats.memoryUsage() +
|
||||
((_findStats.get() == nullptr) ? 0 : _findStats->memoryUsage());
|
||||
_metadata =
|
||||
_manager->registerCache(this, requestedLimit, deleter, fullSize);
|
||||
} catch (std::bad_alloc) {
|
||||
// could not register, mark as non-operational
|
||||
if (!_state.isSet(State::Flag::shutdown)) {
|
||||
_state.toggleFlag(State::Flag::shutdown);
|
||||
}
|
||||
}
|
||||
try {
|
||||
_findStats.reset(new StatBuffer(16384));
|
||||
} catch (std::bad_alloc) {
|
||||
_findStats.reset(nullptr);
|
||||
_enableWindowedStats = false;
|
||||
void Cache::destroy(std::shared_ptr<Cache> cache) {
|
||||
if (cache.get() != nullptr) {
|
||||
cache->shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -283,7 +284,13 @@ bool Cache::requestResize(uint64_t requestedLimit, bool internal) {
|
|||
_resizeRequestTime))) {
|
||||
_metadata->lock();
|
||||
uint64_t newLimit =
|
||||
(requestedLimit > 0) ? requestedLimit : (_metadata->hardLimit() * 2);
|
||||
(requestedLimit > 0)
|
||||
? requestedLimit
|
||||
: (_lastResizeRequestStatus
|
||||
? (_metadata->hardLimit() * 2)
|
||||
: (static_cast<uint64_t>(
|
||||
static_cast<double>(_metadata->hardLimit()) *
|
||||
1.25)));
|
||||
_metadata->unlock();
|
||||
auto result = _manager->requestResize(_metadata, newLimit);
|
||||
_resizeRequestTime = result.second;
|
||||
|
@ -385,6 +392,11 @@ void Cache::beginShutdown() {
|
|||
|
||||
void Cache::shutdown() {
|
||||
_state.lock();
|
||||
_metadata->lock();
|
||||
auto handle = _metadata->cache(); // hold onto self-reference to prevent
|
||||
// pre-mature shared_ptr destruction
|
||||
TRI_ASSERT(handle.get() == this);
|
||||
_metadata->unlock();
|
||||
if (!_state.isSet(State::Flag::shutdown)) {
|
||||
if (!_state.isSet(State::Flag::shuttingDown)) {
|
||||
_state.toggleFlag(State::Flag::shuttingDown);
|
||||
|
|
|
@ -39,13 +39,27 @@
|
|||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
||||
class PlainCache; // forward declaration
|
||||
class TransactionalCache; // forward declaration
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief The common structure of all caches managed by Manager.
|
||||
///
|
||||
/// Any pure virtual functions are documented in derived classes implementing
|
||||
/// them.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
class Cache {
|
||||
class Cache : public std::enable_shared_from_this<Cache> {
|
||||
protected:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief A dummy class to restrict constructor access.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
class ConstructionGuard {
|
||||
private:
|
||||
ConstructionGuard();
|
||||
friend class PlainCache;
|
||||
friend class TransactionalCache;
|
||||
};
|
||||
|
||||
public:
|
||||
typedef FrequencyBuffer<uint8_t> StatBuffer;
|
||||
|
||||
|
@ -93,10 +107,16 @@ class Cache {
|
|||
};
|
||||
|
||||
public:
|
||||
Cache(ConstructionGuard guard, Manager* manager,
|
||||
Manager::MetadataItr metadata, bool allowGrowth,
|
||||
bool enableWindowedStats);
|
||||
virtual ~Cache() = default;
|
||||
|
||||
// primary functionality; documented in derived classes
|
||||
virtual Finding find(void const* key, uint32_t keySize) = 0;
|
||||
virtual bool insert(CachedValue* value) = 0;
|
||||
virtual bool remove(void const* key, uint32_t keySize) = 0;
|
||||
virtual bool blacklist(void const* key, uint32_t keySize) = 0;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Returns the limit on memory usage for this cache in bytes.
|
||||
|
@ -157,8 +177,10 @@ class Cache {
|
|||
insertEviction = 3,
|
||||
insertNoEviction = 4
|
||||
};
|
||||
static uint64_t _evictionStatsCapacity;
|
||||
StatBuffer _evictionStats;
|
||||
std::atomic<uint64_t> _insertionCount;
|
||||
static uint64_t _findStatsCapacity;
|
||||
bool _enableWindowedStats;
|
||||
std::unique_ptr<StatBuffer> _findStats;
|
||||
std::atomic<uint64_t> _findHits;
|
||||
|
@ -174,6 +196,7 @@ class Cache {
|
|||
// times to wait until requesting is allowed again
|
||||
Manager::time_point _migrateRequestTime;
|
||||
Manager::time_point _resizeRequestTime;
|
||||
bool _lastResizeRequestStatus;
|
||||
|
||||
// friend class manager and tasks
|
||||
friend class FreeMemoryTask;
|
||||
|
@ -184,12 +207,6 @@ class Cache {
|
|||
// shutdown cache and let its memory be reclaimed
|
||||
static void destroy(std::shared_ptr<Cache> cache);
|
||||
|
||||
Cache(Manager* manager, uint64_t requestedLimit, bool allowGrowth,
|
||||
bool enableWindowedStats, std::function<void(Cache*)> deleter,
|
||||
uint64_t size);
|
||||
|
||||
virtual ~Cache() = default;
|
||||
|
||||
bool isOperational() const;
|
||||
void startOperation();
|
||||
void endOperation();
|
||||
|
|
|
@ -46,7 +46,8 @@ namespace cache {
|
|||
/// which over-writes itself after it fills up (thus only maintaining a recent
|
||||
/// window on the records).
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
template <class T>
|
||||
template <class T, class Comparator = std::equal_to<T>,
|
||||
class Hasher = std::hash<T>>
|
||||
class FrequencyBuffer {
|
||||
public:
|
||||
typedef std::vector<std::pair<T, uint64_t>> stats_t;
|
||||
|
@ -55,43 +56,54 @@ class FrequencyBuffer {
|
|||
std::atomic<uint64_t> _current;
|
||||
uint64_t _capacity;
|
||||
uint64_t _mask;
|
||||
std::unique_ptr<T[]> _buffer;
|
||||
std::unique_ptr<std::vector<T>> _buffer;
|
||||
Comparator _cmp;
|
||||
T _empty;
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Initialize with the given capacity.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
FrequencyBuffer(uint64_t capacity) : _current(0) {
|
||||
size_t i = 0;
|
||||
for (; (1ULL << i) < capacity; i++) {
|
||||
FrequencyBuffer(uint64_t capacity) : _current(0), _cmp(), _empty() {
|
||||
uint64_t i = 0;
|
||||
for (; (static_cast<uint64_t>(1) << i) < capacity; i++) {
|
||||
}
|
||||
_capacity = (1ULL << i);
|
||||
_capacity = (static_cast<uint64_t>(1) << i);
|
||||
_mask = _capacity - 1;
|
||||
_buffer.reset(new T[_capacity]());
|
||||
_buffer.reset(new std::vector<T>(_capacity));
|
||||
TRI_ASSERT(_buffer->capacity() == _capacity);
|
||||
TRI_ASSERT(_buffer->size() == _capacity);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Reports the hidden allocation size (not captured by sizeof).
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static uint64_t allocationSize(uint64_t capacity) {
|
||||
return sizeof(std::vector<T>) + (capacity * sizeof(T));
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Reports the memory usage in bytes.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
uint64_t memoryUsage() {
|
||||
return ((_capacity * sizeof(T)) + sizeof(FrequencyBuffer<T>));
|
||||
return ((_capacity * sizeof(T)) + sizeof(FrequencyBuffer<T>) +
|
||||
sizeof(std::vector<T>));
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Insert an individual event record.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void insertRecord(T const& record) {
|
||||
++_current;
|
||||
_buffer[_current & _mask] = record;
|
||||
void insertRecord(T record) {
|
||||
(*_buffer)[_current++ & _mask] = record;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Remove all occurrences of the specified event record.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void purgeRecord(T const& record) {
|
||||
void purgeRecord(T record) {
|
||||
for (size_t i = 0; i < _capacity; i++) {
|
||||
if (_buffer[i] == record) {
|
||||
_buffer[i] = T();
|
||||
if (_cmp((*_buffer)[i], record)) {
|
||||
(*_buffer)[i] = _empty;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -100,12 +112,12 @@ class FrequencyBuffer {
|
|||
/// @brief Return a list of (event, count) pairs for each recorded event in
|
||||
/// ascending order.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
std::shared_ptr<FrequencyBuffer::stats_t> getFrequencies() const {
|
||||
std::shared_ptr<typename FrequencyBuffer::stats_t> getFrequencies() const {
|
||||
// calculate frequencies
|
||||
std::unordered_map<T, uint64_t> frequencies;
|
||||
std::unordered_map<T, uint64_t, Hasher, Comparator> frequencies;
|
||||
for (size_t i = 0; i < _capacity; i++) {
|
||||
T entry = _buffer[i];
|
||||
if (entry != T()) {
|
||||
T const entry = (*_buffer)[i];
|
||||
if (!_cmp(entry, _empty)) {
|
||||
frequencies[entry]++;
|
||||
}
|
||||
}
|
||||
|
@ -129,7 +141,7 @@ class FrequencyBuffer {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
void clear() {
|
||||
for (size_t i = 0; i < _capacity; i++) {
|
||||
_buffer[i] = T();
|
||||
(*_buffer)[i] = T();
|
||||
}
|
||||
}
|
||||
};
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "Cache/Metadata.h"
|
||||
#include "Cache/PlainCache.h"
|
||||
#include "Cache/State.h"
|
||||
#include "Cache/Transaction.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
@ -57,6 +58,17 @@ static constexpr uint64_t CACHE_RECORD_OVERHEAD = sizeof(Metadata) + 16;
|
|||
static constexpr uint64_t TABLE_LISTS_OVERHEAD = 32 * 16 * 8;
|
||||
static constexpr int64_t TRIES_FAST = 100;
|
||||
|
||||
bool Manager::cmp_weak_ptr::operator()(
|
||||
std::weak_ptr<Cache> const& left, std::weak_ptr<Cache> const& right) const {
|
||||
return !left.owner_before(right) && !right.owner_before(left);
|
||||
}
|
||||
|
||||
size_t Manager::hash_weak_ptr::operator()(
|
||||
const std::weak_ptr<Cache>& wp) const {
|
||||
auto sp = wp.lock();
|
||||
return std::hash<decltype(sp)>()(sp);
|
||||
}
|
||||
|
||||
Manager::Manager(boost::asio::io_service* ioService, uint64_t globalLimit,
|
||||
bool enableWindowedStats)
|
||||
: _state(),
|
||||
|
@ -98,21 +110,38 @@ std::shared_ptr<Cache> Manager::createCache(Manager::CacheType type,
|
|||
std::shared_ptr<Cache> result(nullptr);
|
||||
_state.lock();
|
||||
bool allowed = isOperational();
|
||||
MetadataItr metadata = _caches.end();
|
||||
_state.unlock();
|
||||
|
||||
if (allowed) {
|
||||
uint64_t fixedSize = 0;
|
||||
switch (type) {
|
||||
case CacheType::Plain:
|
||||
fixedSize = PlainCache::allocationSize(enableWindowedStats);
|
||||
break;
|
||||
case CacheType::Transactional:
|
||||
fixedSize = TransactionalCache::allocationSize(enableWindowedStats);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
std::tie(allowed, metadata) = registerCache(requestedLimit, fixedSize);
|
||||
}
|
||||
|
||||
if (allowed) {
|
||||
switch (type) {
|
||||
case CacheType::Plain:
|
||||
result = PlainCache::create(this, requestedLimit, allowGrowth,
|
||||
result = PlainCache::create(this, metadata, allowGrowth,
|
||||
enableWindowedStats);
|
||||
break;
|
||||
case CacheType::Transactional:
|
||||
result = TransactionalCache::create(this, requestedLimit, allowGrowth,
|
||||
result = TransactionalCache::create(this, metadata, allowGrowth,
|
||||
enableWindowedStats);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
metadata->link(result);
|
||||
}
|
||||
|
||||
return result;
|
||||
|
@ -200,10 +229,12 @@ std::pair<double, double> Manager::globalHitRates() {
|
|||
double lifetimeRate = std::nan("");
|
||||
double windowedRate = std::nan("");
|
||||
|
||||
uint64_t currentMisses = _findMisses.load();
|
||||
uint64_t currentHits = _findHits.load();
|
||||
lifetimeRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
uint64_t currentMisses = _findMisses.load();
|
||||
if (currentHits + currentMisses > 0) {
|
||||
lifetimeRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
}
|
||||
|
||||
if (_enableWindowedStats && _findStats.get() != nullptr) {
|
||||
auto stats = _findStats->getFrequencies();
|
||||
|
@ -221,22 +252,25 @@ std::pair<double, double> Manager::globalHitRates() {
|
|||
currentHits = (*stats)[1].second;
|
||||
currentMisses = (*stats)[0].second;
|
||||
}
|
||||
windowedRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
if (currentHits + currentMisses > 0) {
|
||||
windowedRate = 100 * (static_cast<double>(currentHits) /
|
||||
static_cast<double>(currentHits + currentMisses));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return std::pair<double, double>(lifetimeRate, windowedRate);
|
||||
}
|
||||
|
||||
void Manager::startTransaction() { _transactions.start(); }
|
||||
Transaction* Manager::beginTransaction(bool readOnly) {
|
||||
return _transactions.begin(readOnly);
|
||||
}
|
||||
|
||||
void Manager::endTransaction() { _transactions.end(); }
|
||||
void Manager::endTransaction(Transaction* tx) { _transactions.end(tx); }
|
||||
|
||||
Manager::MetadataItr Manager::registerCache(Cache* cache,
|
||||
uint64_t requestedLimit,
|
||||
std::function<void(Cache*)> deleter,
|
||||
uint64_t fixedSize) {
|
||||
std::pair<bool, Manager::MetadataItr> Manager::registerCache(
|
||||
uint64_t requestedLimit, uint64_t fixedSize) {
|
||||
bool ok = true;
|
||||
uint32_t logSize = 0;
|
||||
uint32_t tableLogSize = MIN_TABLE_LOG_SIZE;
|
||||
for (; (1ULL << logSize) < requestedLimit; logSize++) {
|
||||
|
@ -248,39 +282,42 @@ Manager::MetadataItr Manager::registerCache(Cache* cache,
|
|||
|
||||
_state.lock();
|
||||
if (!isOperational()) {
|
||||
_state.unlock();
|
||||
throw std::bad_alloc();
|
||||
ok = false;
|
||||
}
|
||||
|
||||
while (logSize >= MIN_LOG_SIZE) {
|
||||
uint64_t tableAllocation =
|
||||
_tables[tableLogSize].empty() ? tableSize(tableLogSize) : 0;
|
||||
if (increaseAllowed(grantedLimit + tableAllocation + CACHE_RECORD_OVERHEAD +
|
||||
fixedSize)) {
|
||||
break;
|
||||
if (ok) {
|
||||
while (logSize >= MIN_LOG_SIZE) {
|
||||
uint64_t tableAllocation =
|
||||
_tables[tableLogSize].empty() ? tableSize(tableLogSize) : 0;
|
||||
if (increaseAllowed(grantedLimit + tableAllocation +
|
||||
CACHE_RECORD_OVERHEAD + fixedSize)) {
|
||||
break;
|
||||
}
|
||||
|
||||
grantedLimit >>= 1U;
|
||||
logSize--;
|
||||
if (tableLogSize > MIN_TABLE_LOG_SIZE) {
|
||||
tableLogSize--;
|
||||
}
|
||||
}
|
||||
|
||||
grantedLimit >>= 1U;
|
||||
logSize--;
|
||||
if (tableLogSize > MIN_TABLE_LOG_SIZE) {
|
||||
tableLogSize--;
|
||||
if (logSize < MIN_LOG_SIZE) {
|
||||
ok = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (logSize < MIN_LOG_SIZE) {
|
||||
_state.unlock();
|
||||
throw std::bad_alloc();
|
||||
MetadataItr metadata = _caches.end();
|
||||
if (ok) {
|
||||
_globalAllocation += (grantedLimit + CACHE_RECORD_OVERHEAD + fixedSize);
|
||||
_caches.emplace_front(grantedLimit);
|
||||
metadata = _caches.begin();
|
||||
metadata->lock();
|
||||
leaseTable(metadata, tableLogSize);
|
||||
metadata->unlock();
|
||||
}
|
||||
|
||||
_globalAllocation += (grantedLimit + CACHE_RECORD_OVERHEAD + fixedSize);
|
||||
_caches.emplace_front(std::shared_ptr<Cache>(cache, deleter), grantedLimit);
|
||||
MetadataItr metadata = _caches.begin();
|
||||
metadata->lock();
|
||||
leaseTable(metadata, tableLogSize);
|
||||
metadata->unlock();
|
||||
_state.unlock();
|
||||
|
||||
return metadata;
|
||||
return std::pair<bool, MetadataItr>(ok, metadata);
|
||||
}
|
||||
|
||||
void Manager::unregisterCache(Manager::MetadataItr& metadata) {
|
||||
|
@ -304,7 +341,7 @@ void Manager::unregisterCache(Manager::MetadataItr& metadata) {
|
|||
|
||||
std::pair<bool, Manager::time_point> Manager::requestResize(
|
||||
Manager::MetadataItr& metadata, uint64_t requestedLimit) {
|
||||
Manager::time_point nextRequest = futureTime(30);
|
||||
Manager::time_point nextRequest = futureTime(100);
|
||||
bool allowed = false;
|
||||
|
||||
bool ok = _state.lock(TRIES_FAST);
|
||||
|
@ -334,7 +371,7 @@ std::pair<bool, Manager::time_point> Manager::requestResize(
|
|||
|
||||
std::pair<bool, Manager::time_point> Manager::requestMigrate(
|
||||
Manager::MetadataItr& metadata, uint32_t requestedLogSize) {
|
||||
Manager::time_point nextRequest = futureTime(30);
|
||||
Manager::time_point nextRequest = futureTime(100);
|
||||
bool allowed = false;
|
||||
|
||||
bool ok = _state.lock(TRIES_FAST);
|
||||
|
@ -363,9 +400,10 @@ std::pair<bool, Manager::time_point> Manager::requestMigrate(
|
|||
}
|
||||
|
||||
void Manager::reportAccess(std::shared_ptr<Cache> cache) {
|
||||
if (((++_accessCounter) & 0x7FULL) == 0) { // record 1 in 128
|
||||
_accessStats.insertRecord(cache);
|
||||
}
|
||||
// if (((++_accessCounter) & static_cast<uint64_t>(7)) == 0) { // record 1 in
|
||||
// 8
|
||||
_accessStats.insertRecord(cache);
|
||||
//}
|
||||
}
|
||||
|
||||
void Manager::recordHitStat(Manager::Stat stat) {
|
||||
|
@ -457,26 +495,35 @@ bool Manager::rebalance() {
|
|||
|
||||
// allow background tasks if more than 7/8ths full
|
||||
bool allowTasks =
|
||||
_globalAllocation > (_globalHardLimit - (_globalHardLimit >> 3));
|
||||
_globalAllocation >
|
||||
static_cast<uint64_t>(0.875 * static_cast<double>(_globalHardLimit));
|
||||
|
||||
// be aggressive if more than 3/4ths full
|
||||
bool beAggressive =
|
||||
_globalAllocation > (_globalHardLimit - (_globalHardLimit >> 2));
|
||||
_globalAllocation >
|
||||
static_cast<uint64_t>(0.75 * static_cast<double>(_globalHardLimit));
|
||||
|
||||
// aim for 1/4th with background tasks, 1/8th if no tasks but aggressive, no
|
||||
// aim for 3/8th with background tasks, 1/4th if no tasks but aggressive, no
|
||||
// goal otherwise
|
||||
uint64_t goal = beAggressive ? (allowTasks ? (_globalAllocation >> 2)
|
||||
: (_globalAllocation >> 3))
|
||||
: 0;
|
||||
uint64_t goal =
|
||||
beAggressive
|
||||
? (allowTasks ? static_cast<uint64_t>(
|
||||
0.375 * static_cast<double>(_globalHardLimit))
|
||||
: static_cast<uint64_t>(
|
||||
0.25 * static_cast<double>(_globalHardLimit)))
|
||||
: 0;
|
||||
|
||||
// get stats on cache access to prioritize freeing from less frequently used
|
||||
// caches first, so more frequently used ones stay large
|
||||
std::shared_ptr<PriorityList> cacheList = priorityList();
|
||||
if (goal > 0) {
|
||||
// get stats on cache access to prioritize freeing from less frequently used
|
||||
// caches first, so more frequently used ones stay large
|
||||
std::shared_ptr<PriorityList> cacheList = priorityList();
|
||||
|
||||
// just adjust limits
|
||||
uint64_t reclaimed = resizeAllCaches(TaskEnvironment::rebalancing, cacheList,
|
||||
allowTasks, beAggressive, goal);
|
||||
_globalAllocation -= reclaimed;
|
||||
// just adjust limits
|
||||
uint64_t reclaimed =
|
||||
resizeAllCaches(TaskEnvironment::rebalancing, cacheList, allowTasks,
|
||||
beAggressive, goal);
|
||||
_globalAllocation -= reclaimed;
|
||||
}
|
||||
|
||||
if (_rebalancingTasks.load() == 0) {
|
||||
_state.toggleFlag(State::Flag::rebalancing);
|
||||
|
@ -520,8 +567,9 @@ void Manager::internalResize(uint64_t newGlobalLimit, bool firstAttempt) {
|
|||
cacheList = priorityList();
|
||||
|
||||
// first just adjust limits down to usage
|
||||
uint64_t reclaimed = resizeAllCaches(TaskEnvironment::resizing, cacheList, true,
|
||||
true, _globalAllocation - _globalSoftLimit);
|
||||
uint64_t reclaimed =
|
||||
resizeAllCaches(TaskEnvironment::resizing, cacheList, true, true,
|
||||
_globalAllocation - _globalSoftLimit);
|
||||
_globalAllocation -= reclaimed;
|
||||
done = adjustGlobalLimitsIfAllowed(newGlobalLimit);
|
||||
}
|
||||
|
@ -563,11 +611,10 @@ uint64_t Manager::resizeAllCaches(Manager::TaskEnvironment environment,
|
|||
if (aggressive) {
|
||||
newLimit =
|
||||
(noTasks ? metadata->usage()
|
||||
: (std::min)(metadata->usage(), metadata->hardLimit() / 4));
|
||||
} else {
|
||||
newLimit =
|
||||
(noTasks ? (std::max)(metadata->usage(), metadata->hardLimit() / 2)
|
||||
: (std::min)(metadata->usage(), metadata->hardLimit() / 2));
|
||||
} else {
|
||||
newLimit = (std::max)(metadata->usage(),
|
||||
(metadata->hardLimit() + metadata->usage()) / 2);
|
||||
}
|
||||
newLimit = (std::max)(newLimit, MIN_CACHE_SIZE);
|
||||
|
||||
|
@ -763,9 +810,11 @@ std::shared_ptr<Manager::PriorityList> Manager::priorityList() {
|
|||
|
||||
// catalog accessed caches
|
||||
auto stats = _accessStats.getFrequencies();
|
||||
std::set<Cache*> accessed;
|
||||
std::set<std::shared_ptr<Cache>> accessed;
|
||||
for (auto s : *stats) {
|
||||
accessed.emplace(s.first.get());
|
||||
if (auto cache = s.first.lock()) {
|
||||
accessed.emplace(cache);
|
||||
}
|
||||
}
|
||||
|
||||
// gather all unaccessed caches at beginning of list
|
||||
|
@ -774,7 +823,7 @@ std::shared_ptr<Manager::PriorityList> Manager::priorityList() {
|
|||
std::shared_ptr<Cache> cache = m->cache();
|
||||
m->unlock();
|
||||
|
||||
auto found = accessed.find(cache.get());
|
||||
auto found = accessed.find(cache);
|
||||
if (found == accessed.end()) {
|
||||
list->emplace_back(cache);
|
||||
}
|
||||
|
@ -782,13 +831,15 @@ std::shared_ptr<Manager::PriorityList> Manager::priorityList() {
|
|||
|
||||
// gather all accessed caches in order
|
||||
for (auto s : *stats) {
|
||||
list->emplace_back(s.first);
|
||||
if (auto cache = s.first.lock()) {
|
||||
list->emplace_back(cache);
|
||||
}
|
||||
}
|
||||
|
||||
return list;
|
||||
}
|
||||
|
||||
Manager::time_point Manager::futureTime(uint64_t secondsFromNow) {
|
||||
Manager::time_point Manager::futureTime(uint64_t millisecondsFromNow) {
|
||||
return (std::chrono::steady_clock::now() +
|
||||
std::chrono::seconds(secondsFromNow));
|
||||
std::chrono::milliseconds(millisecondsFromNow));
|
||||
}
|
||||
|
|
|
@ -30,7 +30,8 @@
|
|||
#include "Cache/FrequencyBuffer.h"
|
||||
#include "Cache/Metadata.h"
|
||||
#include "Cache/State.h"
|
||||
#include "Cache/TransactionWindow.h"
|
||||
#include "Cache/Transaction.h"
|
||||
#include "Cache/TransactionManager.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
|
@ -67,9 +68,19 @@ class Rebalancer; // forward declaration
|
|||
/// need a different instance.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
class Manager {
|
||||
protected:
|
||||
struct cmp_weak_ptr {
|
||||
bool operator()(std::weak_ptr<Cache> const& left,
|
||||
std::weak_ptr<Cache> const& right) const;
|
||||
};
|
||||
struct hash_weak_ptr {
|
||||
size_t operator()(const std::weak_ptr<Cache>& wp) const;
|
||||
};
|
||||
|
||||
public:
|
||||
static uint64_t MINIMUM_SIZE;
|
||||
typedef FrequencyBuffer<std::shared_ptr<Cache>> AccessStatBuffer;
|
||||
typedef FrequencyBuffer<std::weak_ptr<Cache>, cmp_weak_ptr, hash_weak_ptr>
|
||||
AccessStatBuffer;
|
||||
typedef FrequencyBuffer<uint8_t> FindStatBuffer;
|
||||
typedef std::vector<std::shared_ptr<Cache>> PriorityList;
|
||||
typedef std::chrono::time_point<std::chrono::steady_clock> time_point;
|
||||
|
@ -144,14 +155,18 @@ class Manager {
|
|||
std::pair<double, double> globalHitRates();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Signal the beginning of a transaction.
|
||||
/// @brief Open a new transaction.
|
||||
///
|
||||
/// The transaction is considered read-only if it is guaranteed not to write
|
||||
/// to the backing store. A read-only transaction may, however, write to the
|
||||
/// cache.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void startTransaction();
|
||||
Transaction* beginTransaction(bool readOnly);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Signal the end of a transaction.
|
||||
/// @brief Signal the end of a transaction. Deletes the passed Transaction.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void endTransaction();
|
||||
void endTransaction(Transaction* tx);
|
||||
|
||||
private:
|
||||
// simple state variable for locking and other purposes
|
||||
|
@ -180,7 +195,7 @@ class Manager {
|
|||
uint64_t _globalAllocation;
|
||||
|
||||
// transaction management
|
||||
TransactionWindow _transactions;
|
||||
TransactionManager _transactions;
|
||||
|
||||
// task management
|
||||
enum TaskEnvironment { none, rebalancing, resizing };
|
||||
|
@ -200,9 +215,8 @@ class Manager {
|
|||
|
||||
private: // used by caches
|
||||
// register and unregister individual caches
|
||||
Manager::MetadataItr registerCache(Cache* cache, uint64_t requestedLimit,
|
||||
std::function<void(Cache*)> deleter,
|
||||
uint64_t fixedSize);
|
||||
std::pair<bool, Manager::MetadataItr> registerCache(uint64_t requestedLimit,
|
||||
uint64_t fixedSize);
|
||||
void unregisterCache(Manager::MetadataItr& metadata);
|
||||
|
||||
// allow individual caches to request changes to their allocations
|
||||
|
@ -259,7 +273,7 @@ class Manager {
|
|||
std::shared_ptr<PriorityList> priorityList();
|
||||
|
||||
// helper for wait times
|
||||
Manager::time_point futureTime(uint64_t secondsFromNow);
|
||||
Manager::time_point futureTime(uint64_t millisecondsFromNow);
|
||||
};
|
||||
|
||||
}; // end namespace cache
|
||||
|
|
|
@ -30,16 +30,15 @@
|
|||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
Metadata::Metadata(std::shared_ptr<Cache> cache, uint64_t limit, uint8_t* table,
|
||||
uint32_t logSize)
|
||||
Metadata::Metadata(uint64_t limit)
|
||||
: _state(),
|
||||
_cache(cache),
|
||||
_cache(nullptr),
|
||||
_usage(0),
|
||||
_softLimit(limit),
|
||||
_hardLimit(limit),
|
||||
_table(table),
|
||||
_table(nullptr),
|
||||
_auxiliaryTable(nullptr),
|
||||
_logSize(logSize),
|
||||
_logSize(0),
|
||||
_auxiliaryLogSize(0) {}
|
||||
|
||||
Metadata::Metadata(Metadata const& other)
|
||||
|
@ -53,6 +52,12 @@ Metadata::Metadata(Metadata const& other)
|
|||
_logSize(other._logSize),
|
||||
_auxiliaryLogSize(other._auxiliaryLogSize) {}
|
||||
|
||||
void Metadata::link(std::shared_ptr<Cache> cache) {
|
||||
lock();
|
||||
_cache = cache;
|
||||
unlock();
|
||||
}
|
||||
|
||||
void Metadata::lock() { _state.lock(); }
|
||||
|
||||
void Metadata::unlock() {
|
||||
|
|
|
@ -44,14 +44,18 @@ class Metadata {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Initializes record with given information.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
Metadata(std::shared_ptr<Cache> cache, uint64_t limit,
|
||||
uint8_t* table = nullptr, uint32_t logSize = 0);
|
||||
Metadata(uint64_t limit);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Initializes record from an existing record.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
Metadata(Metadata const& other);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Links the metadata object to an actual cache.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void link(std::shared_ptr<Cache> cache);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Locks the record.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -114,14 +114,14 @@ CachedValue* PlainBucket::remove(uint32_t hash, void const* key,
|
|||
return value;
|
||||
}
|
||||
|
||||
CachedValue* PlainBucket::evictionCandidate() const {
|
||||
CachedValue* PlainBucket::evictionCandidate(bool ignoreRefCount) const {
|
||||
TRI_ASSERT(isLocked());
|
||||
for (size_t i = 0; i < SLOTS_DATA; i++) {
|
||||
size_t slot = SLOTS_DATA - (i + 1);
|
||||
if (_cachedHashes[slot] == 0) {
|
||||
continue;
|
||||
}
|
||||
if (_cachedData[slot]->isFreeable()) {
|
||||
if (ignoreRefCount || _cachedData[slot]->isFreeable()) {
|
||||
return _cachedData[slot];
|
||||
}
|
||||
}
|
||||
|
@ -166,8 +166,6 @@ void PlainBucket::moveSlot(size_t slot, bool moveToFront) {
|
|||
_cachedData[i] = _cachedData[i + 1];
|
||||
}
|
||||
}
|
||||
if (i != slot) {
|
||||
_cachedHashes[i] = hash;
|
||||
_cachedData[i] = value;
|
||||
}
|
||||
_cachedHashes[i] = hash;
|
||||
_cachedData[i] = value;
|
||||
}
|
||||
|
|
|
@ -128,11 +128,12 @@ struct alignas(64) PlainBucket {
|
|||
/// @brief Searches for the best candidate in the bucket to evict. Requires
|
||||
/// state to be locked.
|
||||
///
|
||||
/// Returns a pointer to least recently used freeable value. If the bucket
|
||||
/// contains no values or all have outstanding references, then it returns
|
||||
/// nullptr.
|
||||
/// Usually returns a pointer to least recently used freeable value. If the
|
||||
/// bucket contains no values or all have outstanding references, then it
|
||||
/// returns nullptr. In the case that ignoreRefCount is set to true, then it
|
||||
/// simply returns the least recently used value, regardless of freeability.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
CachedValue* evictionCandidate() const;
|
||||
CachedValue* evictionCandidate(bool ignoreRefCount = false) const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Evicts the given value from the bucket. Requires state to be
|
||||
|
|
|
@ -71,36 +71,46 @@ bool PlainCache::insert(CachedValue* value) {
|
|||
std::tie(ok, bucket) = getBucket(hash, TRIES_FAST);
|
||||
|
||||
if (ok) {
|
||||
bool allowed = true;
|
||||
bool eviction = false;
|
||||
int64_t change = value->size();
|
||||
CachedValue* candidate = bucket->find(hash, value->key(), value->keySize);
|
||||
|
||||
if (candidate == nullptr && bucket->isFull()) {
|
||||
candidate = bucket->evictionCandidate();
|
||||
if (candidate == nullptr) {
|
||||
allowed = false;
|
||||
} else {
|
||||
eviction = true;
|
||||
}
|
||||
}
|
||||
if (candidate != nullptr) {
|
||||
change -= candidate->size();
|
||||
}
|
||||
|
||||
_metadata->lock();
|
||||
bool allowed = _metadata->adjustUsageIfAllowed(change);
|
||||
_metadata->unlock();
|
||||
|
||||
if (allowed) {
|
||||
if (candidate != nullptr) {
|
||||
bucket->evict(candidate, true);
|
||||
freeValue(candidate);
|
||||
recordStat(Stat::insertEviction);
|
||||
} else {
|
||||
recordStat(Stat::insertNoEviction);
|
||||
change -= candidate->size();
|
||||
}
|
||||
|
||||
_metadata->lock();
|
||||
allowed = _metadata->adjustUsageIfAllowed(change);
|
||||
_metadata->unlock();
|
||||
|
||||
if (allowed) {
|
||||
if (candidate != nullptr) {
|
||||
bucket->evict(candidate, true);
|
||||
freeValue(candidate);
|
||||
}
|
||||
recordStat(eviction ? Stat::insertEviction : Stat::insertNoEviction);
|
||||
bucket->insert(hash, value);
|
||||
inserted = true;
|
||||
} else {
|
||||
requestResize(); // let function do the hard work
|
||||
}
|
||||
bucket->insert(hash, value);
|
||||
inserted = true;
|
||||
} else {
|
||||
requestResize(); // let function do the hard work
|
||||
}
|
||||
|
||||
bucket->unlock();
|
||||
requestMigrate(); // let function do the hard work
|
||||
if (inserted) {
|
||||
requestMigrate(); // let function do the hard work
|
||||
}
|
||||
endOperation();
|
||||
}
|
||||
|
||||
|
@ -138,29 +148,29 @@ bool PlainCache::remove(void const* key, uint32_t keySize) {
|
|||
return removed;
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> PlainCache::create(Manager* manager,
|
||||
uint64_t requestedSize,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats) {
|
||||
PlainCache* cache =
|
||||
new PlainCache(manager, requestedSize, allowGrowth, enableWindowedStats);
|
||||
bool PlainCache::blacklist(void const* key, uint32_t keySize) { return false; }
|
||||
|
||||
if (cache == nullptr) {
|
||||
return std::shared_ptr<Cache>(nullptr);
|
||||
}
|
||||
|
||||
cache->metadata()->lock();
|
||||
std::shared_ptr<Cache> result = cache->metadata()->cache();
|
||||
cache->metadata()->unlock();
|
||||
|
||||
return result;
|
||||
uint64_t PlainCache::allocationSize(bool enableWindowedStats) {
|
||||
return sizeof(PlainCache) +
|
||||
StatBuffer::allocationSize(_evictionStatsCapacity) +
|
||||
(enableWindowedStats ? (sizeof(StatBuffer) +
|
||||
StatBuffer::allocationSize(_findStatsCapacity))
|
||||
: 0);
|
||||
}
|
||||
|
||||
PlainCache::PlainCache(Manager* manager, uint64_t requestedLimit,
|
||||
bool allowGrowth, bool enableWindowedStats)
|
||||
: Cache(manager, requestedLimit, allowGrowth, enableWindowedStats,
|
||||
[](Cache* p) -> void { delete reinterpret_cast<PlainCache*>(p); },
|
||||
sizeof(PlainCache)),
|
||||
std::shared_ptr<Cache> PlainCache::create(Manager* manager,
|
||||
Manager::MetadataItr metadata,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats) {
|
||||
return std::make_shared<PlainCache>(Cache::ConstructionGuard(), manager,
|
||||
metadata, allowGrowth,
|
||||
enableWindowedStats);
|
||||
}
|
||||
|
||||
PlainCache::PlainCache(Cache::ConstructionGuard guard, Manager* manager,
|
||||
Manager::MetadataItr metadata, bool allowGrowth,
|
||||
bool enableWindowedStats)
|
||||
: Cache(guard, manager, metadata, allowGrowth, enableWindowedStats),
|
||||
_table(nullptr),
|
||||
_logSize(0),
|
||||
_tableSize(1),
|
||||
|
@ -186,7 +196,7 @@ PlainCache::PlainCache(Manager* manager, uint64_t requestedLimit,
|
|||
|
||||
PlainCache::~PlainCache() {
|
||||
_state.lock();
|
||||
if (isOperational()) {
|
||||
if (!_state.isSet(State::Flag::shutdown)) {
|
||||
_state.unlock();
|
||||
shutdown();
|
||||
}
|
||||
|
@ -295,8 +305,8 @@ bool PlainCache::migrate() {
|
|||
for (size_t j = 0; j < PlainBucket::SLOTS_DATA; j++) {
|
||||
size_t k = PlainBucket::SLOTS_DATA - (j + 1);
|
||||
if ((*bucket)._cachedHashes[k] != 0) {
|
||||
uint32_t hash = (*bucket)._cachedHashes[k];
|
||||
CachedValue* value = (*bucket)._cachedData[k];
|
||||
uint32_t hash = bucket->_cachedHashes[k];
|
||||
CachedValue* value = bucket->_cachedData[k];
|
||||
|
||||
uint32_t targetIndex =
|
||||
(hash & _auxiliaryBucketMask) >> _auxiliaryMaskShift;
|
||||
|
@ -316,11 +326,13 @@ bool PlainCache::migrate() {
|
|||
if (haveSpace) {
|
||||
targetBucket->insert(hash, value);
|
||||
} else {
|
||||
uint64_t size = value->size();
|
||||
freeValue(value);
|
||||
reclaimMemory(size);
|
||||
}
|
||||
|
||||
(*bucket)._cachedHashes[k] = 0;
|
||||
(*bucket)._cachedData[k] = nullptr;
|
||||
bucket->_cachedHashes[k] = 0;
|
||||
bucket->_cachedData[k] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -418,15 +430,12 @@ void PlainCache::clearTable(PlainBucket* table, uint64_t tableSize) {
|
|||
for (uint64_t i = 0; i < tableSize; i++) {
|
||||
PlainBucket* bucket = &(table[i]);
|
||||
bucket->lock(-1LL);
|
||||
CachedValue* value = bucket->evictionCandidate();
|
||||
while (value != nullptr) {
|
||||
bucket->evict(value);
|
||||
_metadata->lock();
|
||||
_metadata->adjustUsageIfAllowed(-static_cast<int64_t>(value->size()));
|
||||
_metadata->unlock();
|
||||
freeValue(value);
|
||||
|
||||
value = bucket->evictionCandidate();
|
||||
for (size_t j = 0; j < PlainBucket::SLOTS_DATA; j++) {
|
||||
if (bucket->_cachedData[j] != nullptr) {
|
||||
uint64_t size = bucket->_cachedData[j]->size();
|
||||
freeValue(bucket->_cachedData[j]);
|
||||
reclaimMemory(size);
|
||||
}
|
||||
}
|
||||
bucket->clear();
|
||||
}
|
||||
|
|
|
@ -42,8 +42,6 @@
|
|||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
||||
class Manager; // forward declaration
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief A simple, LRU-ish cache.
|
||||
///
|
||||
|
@ -53,6 +51,11 @@ class Manager; // forward declaration
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
class PlainCache final : public Cache {
|
||||
public:
|
||||
PlainCache(Cache::ConstructionGuard guard, Manager* manager,
|
||||
Manager::MetadataItr metadata, bool allowGrowth,
|
||||
bool enableWindowedStats);
|
||||
~PlainCache();
|
||||
|
||||
PlainCache() = delete;
|
||||
PlainCache(PlainCache const&) = delete;
|
||||
PlainCache& operator=(PlainCache const&) = delete;
|
||||
|
@ -86,6 +89,11 @@ class PlainCache final : public Cache {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool remove(void const* key, uint32_t keySize);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Does nothing; convenience method inheritance compliance
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool blacklist(void const* key, uint32_t keySize);
|
||||
|
||||
private:
|
||||
// main table info
|
||||
PlainBucket* _table;
|
||||
|
@ -107,15 +115,11 @@ class PlainCache final : public Cache {
|
|||
friend class MigrateTask;
|
||||
|
||||
private:
|
||||
// creator -- do not use constructor explicitly
|
||||
static std::shared_ptr<Cache> create(Manager* manager, uint64_t requestedSize,
|
||||
static uint64_t allocationSize(bool enableWindowedStats);
|
||||
static std::shared_ptr<Cache> create(Manager* manager,
|
||||
Manager::MetadataItr metadata,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats);
|
||||
|
||||
PlainCache(Manager* manager, uint64_t requestedLimit, bool allowGrowth,
|
||||
bool enableWindowedStats);
|
||||
~PlainCache();
|
||||
|
||||
// management
|
||||
bool freeMemory();
|
||||
bool migrate();
|
||||
|
|
|
@ -21,25 +21,13 @@
|
|||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Cache/TransactionWindow.h"
|
||||
#include "Cache/Transaction.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
TransactionWindow::TransactionWindow() : _open(0), _term(0) {}
|
||||
Transaction::Transaction() : term(0), readOnly(true), sensitive(false) {}
|
||||
|
||||
void TransactionWindow::start() {
|
||||
if (++_open == 1) {
|
||||
_term++;
|
||||
}
|
||||
}
|
||||
|
||||
void TransactionWindow::end() {
|
||||
if (--_open == 0) {
|
||||
_term++;
|
||||
}
|
||||
}
|
||||
|
||||
uint64_t TransactionWindow::term() { return _term.load(); }
|
||||
Transaction::Transaction(bool ro)
|
||||
: term(0), readOnly(ro), sensitive(!readOnly) {}
|
|
@ -0,0 +1,47 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGODB_CACHE_TRANSACTION_H
|
||||
#define ARANGODB_CACHE_TRANSACTION_H
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include <stdint.h>
|
||||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Structure to maintain information about an individual transaction.
|
||||
struct Transaction {
|
||||
uint64_t term;
|
||||
bool readOnly;
|
||||
bool sensitive;
|
||||
|
||||
Transaction();
|
||||
Transaction(bool ro);
|
||||
};
|
||||
|
||||
}; // end namespace cache
|
||||
}; // end namespace arangodb
|
||||
|
||||
#endif
|
|
@ -0,0 +1,83 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2017 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Cache/TransactionManager.h"
|
||||
#include "Cache/State.h"
|
||||
#include "Cache/Transaction.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
TransactionManager::TransactionManager()
|
||||
: _state(), _openReads(0), _openSensitive(0), _openWrites(0), _term(0) {}
|
||||
|
||||
Transaction* TransactionManager::begin(bool readOnly) {
|
||||
_state.lock();
|
||||
Transaction* tx = new Transaction(readOnly);
|
||||
|
||||
if (readOnly) {
|
||||
_openReads++;
|
||||
if (_openWrites.load() > 0) {
|
||||
tx->sensitive = true;
|
||||
_openSensitive++;
|
||||
}
|
||||
} else {
|
||||
tx->sensitive = true;
|
||||
_openWrites++;
|
||||
if (++_openSensitive == 1) {
|
||||
_term++;
|
||||
_openSensitive += _openReads.load();
|
||||
}
|
||||
}
|
||||
tx->term = _term;
|
||||
_state.unlock();
|
||||
|
||||
return tx;
|
||||
}
|
||||
|
||||
void TransactionManager::end(Transaction* tx) {
|
||||
TRI_ASSERT(tx != nullptr);
|
||||
_state.lock();
|
||||
// if currently in sensitive phase, and transaction term is old, it was
|
||||
// upgraded to sensitive status
|
||||
if (((_term & static_cast<uint64_t>(1)) > 0) && (_term > tx->term)) {
|
||||
tx->sensitive = true;
|
||||
}
|
||||
|
||||
if (tx->readOnly) {
|
||||
_openReads--;
|
||||
} else {
|
||||
_openWrites--;
|
||||
}
|
||||
|
||||
if (tx->sensitive && (--_openSensitive == 0)) {
|
||||
_term++;
|
||||
}
|
||||
|
||||
_state.unlock();
|
||||
delete tx;
|
||||
}
|
||||
|
||||
uint64_t TransactionManager::term() { return _term.load(); }
|
|
@ -25,6 +25,8 @@
|
|||
#define ARANGODB_CACHE_TRANSACTION_WINDOW_H
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Cache/State.h"
|
||||
#include "Cache/Transaction.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
|
@ -33,28 +35,35 @@ namespace arangodb {
|
|||
namespace cache {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Manage windows in time when there are either no ongoing transactions
|
||||
/// or some.
|
||||
/// @brief Manage global cache transactions.
|
||||
///
|
||||
/// Allows clients to start a transaction, end a transaction, and query an
|
||||
/// identifier for the current window.
|
||||
/// identifier for the current window. If the identifier is even, there are no
|
||||
/// ongoing sensitive transactions, and it is safe to store any values retrieved
|
||||
/// from the backing store to transactional caches. If the identifier is odd,
|
||||
/// then some values may be blacklisted by transactional caches (if they have
|
||||
/// been written to the backing store in the current window).
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
class TransactionWindow {
|
||||
class TransactionManager {
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Initialize state with no open transactions.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
TransactionWindow();
|
||||
TransactionManager();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Signal the beginning of a transaction.
|
||||
/// @brief Open a new transaction.
|
||||
///
|
||||
/// The transaction is considered read-only if it is guaranteed not to write
|
||||
/// to the backing store. A read-only transaction may, however, write to the
|
||||
/// cache.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void start();
|
||||
Transaction* begin(bool readOnly);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Signal the end of a transaction.
|
||||
/// @brief Signal the end of a transaction. Deletes the passed Transaction.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void end();
|
||||
void end(Transaction* tx);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Return the current window identifier.
|
||||
|
@ -62,7 +71,10 @@ class TransactionWindow {
|
|||
uint64_t term();
|
||||
|
||||
private:
|
||||
std::atomic<uint64_t> _open;
|
||||
State _state;
|
||||
std::atomic<uint64_t> _openReads;
|
||||
std::atomic<uint64_t> _openSensitive;
|
||||
std::atomic<uint64_t> _openWrites;
|
||||
std::atomic<uint64_t> _term;
|
||||
};
|
||||
|
|
@ -35,12 +35,14 @@ size_t TransactionalBucket::SLOTS_DATA = 3;
|
|||
size_t TransactionalBucket::SLOTS_BLACKLIST = 4;
|
||||
|
||||
TransactionalBucket::TransactionalBucket() {
|
||||
memset(this, 0, sizeof(TransactionalBucket));
|
||||
_state.lock();
|
||||
clear();
|
||||
}
|
||||
|
||||
bool TransactionalBucket::lock(uint64_t transactionTerm, int64_t maxTries) {
|
||||
return _state.lock(maxTries,
|
||||
[&]() -> void { updateBlacklistTerm(transactionTerm); });
|
||||
return _state.lock(maxTries, [this, transactionTerm]() -> void {
|
||||
updateBlacklistTerm(transactionTerm);
|
||||
});
|
||||
}
|
||||
|
||||
void TransactionalBucket::unlock() {
|
||||
|
@ -52,12 +54,12 @@ bool TransactionalBucket::isLocked() const { return _state.isLocked(); }
|
|||
|
||||
bool TransactionalBucket::isMigrated() const {
|
||||
TRI_ASSERT(isLocked());
|
||||
return _state.isSet(State::Flag::blacklisted);
|
||||
return _state.isSet(State::Flag::migrated);
|
||||
}
|
||||
|
||||
bool TransactionalBucket::isFullyBlacklisted() const {
|
||||
TRI_ASSERT(isLocked());
|
||||
return _state.isSet(State::Flag::blacklisted);
|
||||
return (haveOpenTransaction() && _state.isSet(State::Flag::blacklisted));
|
||||
}
|
||||
|
||||
bool TransactionalBucket::isFull() const {
|
||||
|
@ -125,30 +127,39 @@ CachedValue* TransactionalBucket::remove(uint32_t hash, void const* key,
|
|||
return value;
|
||||
}
|
||||
|
||||
void TransactionalBucket::blacklist(uint32_t hash, void const* key,
|
||||
uint32_t keySize) {
|
||||
CachedValue* TransactionalBucket::blacklist(uint32_t hash, void const* key,
|
||||
uint32_t keySize) {
|
||||
TRI_ASSERT(isLocked());
|
||||
// remove key if it's here
|
||||
remove(hash, key, keySize);
|
||||
if (!haveOpenTransaction()) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (isFullyBlacklisted()) {
|
||||
return;
|
||||
// remove key if it's here
|
||||
CachedValue* value = (keySize == 0) ? nullptr : remove(hash, key, keySize);
|
||||
|
||||
if (isBlacklisted(hash)) {
|
||||
return value;
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < SLOTS_BLACKLIST; i++) {
|
||||
if (_blacklistHashes[i] == 0) {
|
||||
// found an empty slot
|
||||
_blacklistHashes[i] = hash;
|
||||
return;
|
||||
return value;
|
||||
}
|
||||
}
|
||||
|
||||
// no empty slot found, fully blacklist
|
||||
_state.toggleFlag(State::Flag::blacklisted);
|
||||
return value;
|
||||
}
|
||||
|
||||
bool TransactionalBucket::isBlacklisted(uint32_t hash) const {
|
||||
TRI_ASSERT(isLocked());
|
||||
if (!haveOpenTransaction()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (isFullyBlacklisted()) {
|
||||
return true;
|
||||
}
|
||||
|
@ -164,14 +175,14 @@ bool TransactionalBucket::isBlacklisted(uint32_t hash) const {
|
|||
return blacklisted;
|
||||
}
|
||||
|
||||
CachedValue* TransactionalBucket::evictionCandidate() const {
|
||||
CachedValue* TransactionalBucket::evictionCandidate(bool ignoreRefCount) const {
|
||||
TRI_ASSERT(isLocked());
|
||||
for (size_t i = 0; i < SLOTS_DATA; i++) {
|
||||
size_t slot = SLOTS_DATA - (i + 1);
|
||||
if (_cachedHashes[slot] == 0) {
|
||||
continue;
|
||||
}
|
||||
if (_cachedData[slot]->isFreeable()) {
|
||||
if (ignoreRefCount || _cachedData[slot]->isFreeable()) {
|
||||
return _cachedData[slot];
|
||||
}
|
||||
}
|
||||
|
@ -193,6 +204,11 @@ void TransactionalBucket::evict(CachedValue* value, bool optimizeForInsertion) {
|
|||
}
|
||||
}
|
||||
|
||||
void TransactionalBucket::clear() {
|
||||
TRI_ASSERT(isLocked());
|
||||
memset(this, 0, sizeof(TransactionalBucket));
|
||||
}
|
||||
|
||||
void TransactionalBucket::updateBlacklistTerm(uint64_t term) {
|
||||
if (term > _blacklistTerm) {
|
||||
_blacklistTerm = term;
|
||||
|
@ -206,6 +222,7 @@ void TransactionalBucket::updateBlacklistTerm(uint64_t term) {
|
|||
}
|
||||
|
||||
void TransactionalBucket::moveSlot(size_t slot, bool moveToFront) {
|
||||
TRI_ASSERT(isLocked());
|
||||
uint32_t hash = _cachedHashes[slot];
|
||||
CachedValue* value = _cachedData[slot];
|
||||
size_t i = slot;
|
||||
|
@ -222,8 +239,12 @@ void TransactionalBucket::moveSlot(size_t slot, bool moveToFront) {
|
|||
_cachedData[i] = _cachedData[i + 1];
|
||||
}
|
||||
}
|
||||
if (i != slot) {
|
||||
_cachedHashes[i] = hash;
|
||||
_cachedData[i] = value;
|
||||
}
|
||||
_cachedHashes[i] = hash;
|
||||
_cachedData[i] = value;
|
||||
}
|
||||
|
||||
bool TransactionalBucket::haveOpenTransaction() const {
|
||||
TRI_ASSERT(isLocked());
|
||||
// only have open transactions if term is odd
|
||||
return ((_blacklistTerm & 1ULL) > 0);
|
||||
}
|
||||
|
|
|
@ -34,6 +34,16 @@
|
|||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bucket structure for TransactionalCache.
|
||||
///
|
||||
/// Contains, a State variable, three slots each for hashes and data pointers,
|
||||
/// four slots for blacklisted hashes, and the applicable transaction term. Most
|
||||
/// querying and manipulation can be handled via the exposed methods. Bucket
|
||||
/// must be locked before doing anything else to ensure proper synchronization.
|
||||
/// Data entries are carefully laid out to ensure the structure fits in a single
|
||||
/// cacheline.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
struct alignas(64) TransactionalBucket {
|
||||
State _state;
|
||||
|
||||
|
@ -52,33 +62,136 @@ struct alignas(64) TransactionalBucket {
|
|||
uint32_t _padding[3];
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Initialize an empty bucket.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
TransactionalBucket();
|
||||
|
||||
// must lock before using any other operations
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Attempt to lock bucket (failing after maxTries attempts).
|
||||
///
|
||||
/// If the bucket is successfully locked, the transaction term is updated.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool lock(uint64_t transactionTerm, int64_t maxTries);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Unlock the bucket. Requires bucket to be locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void unlock();
|
||||
|
||||
// state checkers
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Checks whether the bucket is locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isLocked() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Checks whether the bucket has been migrated. Requires state to be
|
||||
/// locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isMigrated() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Checks whether bucket has been fully blacklisted. Requires state to
|
||||
/// be locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isFullyBlacklisted() const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Checks whether bucket is full. Requires state to be locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isFull() const;
|
||||
|
||||
// primary functions
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Looks up a given key and returns associated value. Requires state
|
||||
/// to be locked.
|
||||
///
|
||||
/// Takes an input hash and key (specified by pointer and size), and searches
|
||||
/// the bucket for a matching entry. If a matching entry is found, it is
|
||||
/// returned. By default, a matching entry will be moved to the front of the
|
||||
/// bucket to allow basic LRU semantics. If no matching entry is found,
|
||||
/// nothing will be changed and a nullptr will be returned.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
CachedValue* find(uint32_t hash, void const* key, uint32_t keySize,
|
||||
bool moveToFront = true);
|
||||
void insert(uint32_t hash, CachedValue* value);
|
||||
CachedValue* remove(uint32_t hash, void const* key, uint32_t keySize);
|
||||
void blacklist(uint32_t hash, void const* key, uint32_t keySize);
|
||||
|
||||
// auxiliary functions
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Inserts a given value if it is not blacklisted. Requires state to
|
||||
/// be locked.
|
||||
///
|
||||
/// Requires that the bucket is not full and does not already contain an item
|
||||
/// with the same key. If it is full, the item will not be inserted. If an
|
||||
/// item with the same key exists, this is not detected but it is likely to
|
||||
/// produce bugs later on down the line. If the item's hash has been
|
||||
/// blacklisted, or the bucket is fully blacklisted, insertion will simply do
|
||||
/// nothing. When inserting, the item is put into the first empty slot, then
|
||||
/// moved to the front. If attempting to insert and the bucket is full, the
|
||||
/// user should evict an item and specify the optimizeForInsertion flag to be
|
||||
/// true.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void insert(uint32_t hash, CachedValue* value);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Removes an item with the given key if one exists. Requires state to
|
||||
/// be locked.
|
||||
///
|
||||
/// Search for a matching key. If none exists, do nothing and return a
|
||||
/// nullptr. If one exists, remove it from the bucket and return the pointer
|
||||
/// to the value. Upon removal, the empty slot generated is moved to the back
|
||||
/// of the bucket (to remove the gap).
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
CachedValue* remove(uint32_t hash, void const* key, uint32_t keySize);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Blacklists a key and removes it if it exists. Requires state to
|
||||
/// be locked.
|
||||
///
|
||||
/// Search for a matching key. If one exists, remove it. Then blacklist the
|
||||
/// hash associated with the key. If there are no empty blacklist slots, fully
|
||||
/// blacklist the bucket.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
CachedValue* blacklist(uint32_t hash, void const* key, uint32_t keySize);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Checks whether a given hash is blacklisted. Requires state to be
|
||||
/// locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isBlacklisted(uint32_t hash) const;
|
||||
CachedValue* evictionCandidate() const;
|
||||
void evict(CachedValue* value, bool optimizeForInsertion);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Searches for the best candidate in the bucket to evict. Requires
|
||||
/// state to be locked.
|
||||
///
|
||||
/// Usually returns a pointer to least recently used freeable value. If the
|
||||
/// bucket contains no values or all have outstanding references, then it
|
||||
/// returns nullptr. In the case that ignoreRefCount is set to true, then it
|
||||
/// simply returns the least recently used value, regardless of freeability.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
CachedValue* evictionCandidate(bool ignoreRefCount = false) const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Evicts the given value from the bucket. Requires state to be
|
||||
/// locked.
|
||||
///
|
||||
/// By default, it will move the empty slot to the back of the bucket. If
|
||||
/// preparing an empty slot for insertion, specify the second parameter to be
|
||||
/// true. This will move the empty slot to the front instead.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void evict(CachedValue* value, bool optimizeForInsertion = false);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Updates the bucket's blacklist term. Requires state to be locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void updateBlacklistTerm(uint64_t term);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Reinitializes a bucket to be completely empty and unlocked.
|
||||
/// Requires state to be locked.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
void clear();
|
||||
|
||||
private:
|
||||
void updateBlacklistTerm(uint64_t term);
|
||||
void moveSlot(size_t slot, bool moveToFront);
|
||||
bool haveOpenTransaction() const;
|
||||
};
|
||||
|
||||
}; // end namespace cache
|
||||
|
|
|
@ -27,7 +27,9 @@
|
|||
#include "Cache/CachedValue.h"
|
||||
#include "Cache/FrequencyBuffer.h"
|
||||
#include "Cache/Metadata.h"
|
||||
#include "Cache/State.h"
|
||||
#include "Cache/TransactionalBucket.h"
|
||||
#include "Random/RandomGenerator.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
|
@ -36,69 +38,485 @@
|
|||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
static constexpr int64_t TRIES_FAST = 50LL;
|
||||
static constexpr int64_t TRIES_SLOW = 10000LL;
|
||||
static constexpr int64_t TRIES_GUARANTEE = -1LL;
|
||||
|
||||
Cache::Finding TransactionalCache::find(void const* key, uint32_t keySize) {
|
||||
// TODO: implement this;
|
||||
return Cache::Finding(nullptr);
|
||||
}
|
||||
TRI_ASSERT(key != nullptr);
|
||||
Finding result(nullptr);
|
||||
uint32_t hash = hashKey(key, keySize);
|
||||
|
||||
bool TransactionalCache::insert(CachedValue* value) {
|
||||
// TODO: implement this
|
||||
return false;
|
||||
}
|
||||
bool ok;
|
||||
TransactionalBucket* bucket;
|
||||
std::tie(ok, bucket) = getBucket(hash, TRIES_FAST);
|
||||
|
||||
bool TransactionalCache::remove(void const* key, uint32_t keySize) {
|
||||
// TODO: implement this
|
||||
return false;
|
||||
}
|
||||
|
||||
void TransactionalCache::blackList(void const* key, uint32_t keySize) {
|
||||
// TODO: implement this
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> TransactionalCache::create(Manager* manager,
|
||||
uint64_t requestedSize,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats) {
|
||||
TransactionalCache* cache = new TransactionalCache(
|
||||
manager, requestedSize, allowGrowth, enableWindowedStats);
|
||||
|
||||
if (cache == nullptr) {
|
||||
return std::shared_ptr<Cache>(nullptr);
|
||||
if (ok) {
|
||||
result.reset(bucket->find(hash, key, keySize));
|
||||
recordStat(result.found() ? Stat::findHit : Stat::findMiss);
|
||||
bucket->unlock();
|
||||
endOperation();
|
||||
}
|
||||
|
||||
cache->metadata()->lock();
|
||||
auto result = cache->metadata()->cache();
|
||||
cache->metadata()->unlock();
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
TransactionalCache::TransactionalCache(Manager* manager,
|
||||
uint64_t requestedLimit,
|
||||
bool TransactionalCache::insert(CachedValue* value) {
|
||||
TRI_ASSERT(value != nullptr);
|
||||
bool inserted = false;
|
||||
uint32_t hash = hashKey(value->key(), value->keySize);
|
||||
|
||||
bool ok;
|
||||
TransactionalBucket* bucket;
|
||||
std::tie(ok, bucket) = getBucket(hash, TRIES_FAST);
|
||||
|
||||
if (ok) {
|
||||
bool allowed = !bucket->isBlacklisted(hash);
|
||||
if (allowed) {
|
||||
bool eviction = false;
|
||||
int64_t change = value->size();
|
||||
CachedValue* candidate = bucket->find(hash, value->key(), value->keySize);
|
||||
|
||||
if (candidate == nullptr && bucket->isFull()) {
|
||||
candidate = bucket->evictionCandidate();
|
||||
if (candidate == nullptr) {
|
||||
allowed = false;
|
||||
} else {
|
||||
eviction = true;
|
||||
}
|
||||
}
|
||||
|
||||
if (allowed) {
|
||||
if (candidate != nullptr) {
|
||||
change -= candidate->size();
|
||||
}
|
||||
|
||||
_metadata->lock();
|
||||
allowed = _metadata->adjustUsageIfAllowed(change);
|
||||
_metadata->unlock();
|
||||
|
||||
if (allowed) {
|
||||
if (candidate != nullptr) {
|
||||
bucket->evict(candidate, true);
|
||||
freeValue(candidate);
|
||||
}
|
||||
recordStat(eviction ? Stat::insertEviction : Stat::insertNoEviction);
|
||||
bucket->insert(hash, value);
|
||||
inserted = true;
|
||||
} else {
|
||||
requestResize(); // let function do the hard work
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bucket->unlock();
|
||||
if (inserted) {
|
||||
requestMigrate(); // let function do the hard work
|
||||
}
|
||||
endOperation();
|
||||
}
|
||||
|
||||
return inserted;
|
||||
}
|
||||
|
||||
bool TransactionalCache::remove(void const* key, uint32_t keySize) {
|
||||
TRI_ASSERT(key != nullptr);
|
||||
bool removed = false;
|
||||
uint32_t hash = hashKey(key, keySize);
|
||||
|
||||
bool ok;
|
||||
TransactionalBucket* bucket;
|
||||
std::tie(ok, bucket) = getBucket(hash, TRIES_SLOW);
|
||||
|
||||
if (ok) {
|
||||
CachedValue* candidate = bucket->remove(hash, key, keySize);
|
||||
|
||||
if (candidate != nullptr) {
|
||||
int64_t change = -static_cast<int64_t>(candidate->size());
|
||||
|
||||
_metadata->lock();
|
||||
bool allowed = _metadata->adjustUsageIfAllowed(change);
|
||||
TRI_ASSERT(allowed);
|
||||
_metadata->unlock();
|
||||
|
||||
freeValue(candidate);
|
||||
}
|
||||
|
||||
removed = true;
|
||||
bucket->unlock();
|
||||
endOperation();
|
||||
}
|
||||
|
||||
return removed;
|
||||
}
|
||||
|
||||
bool TransactionalCache::blacklist(void const* key, uint32_t keySize) {
|
||||
TRI_ASSERT(key != nullptr);
|
||||
bool blacklisted = false;
|
||||
uint32_t hash = hashKey(key, keySize);
|
||||
|
||||
bool ok;
|
||||
TransactionalBucket* bucket;
|
||||
std::tie(ok, bucket) = getBucket(hash, TRIES_SLOW);
|
||||
|
||||
if (ok) {
|
||||
CachedValue* candidate = bucket->blacklist(hash, key, keySize);
|
||||
blacklisted = true;
|
||||
|
||||
if (candidate != nullptr) {
|
||||
int64_t change = -static_cast<int64_t>(candidate->size());
|
||||
|
||||
_metadata->lock();
|
||||
bool allowed = _metadata->adjustUsageIfAllowed(change);
|
||||
TRI_ASSERT(allowed);
|
||||
_metadata->unlock();
|
||||
|
||||
freeValue(candidate);
|
||||
}
|
||||
|
||||
bucket->unlock();
|
||||
endOperation();
|
||||
}
|
||||
|
||||
return blacklisted;
|
||||
}
|
||||
|
||||
uint64_t TransactionalCache::allocationSize(bool enableWindowedStats) {
|
||||
return sizeof(TransactionalCache) +
|
||||
StatBuffer::allocationSize(_evictionStatsCapacity) +
|
||||
(enableWindowedStats ? (sizeof(StatBuffer) +
|
||||
StatBuffer::allocationSize(_findStatsCapacity))
|
||||
: 0);
|
||||
}
|
||||
|
||||
std::shared_ptr<Cache> TransactionalCache::create(Manager* manager,
|
||||
Manager::MetadataItr metadata,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats) {
|
||||
return std::make_shared<TransactionalCache>(Cache::ConstructionGuard(),
|
||||
manager, metadata, allowGrowth,
|
||||
enableWindowedStats);
|
||||
}
|
||||
|
||||
TransactionalCache::TransactionalCache(Cache::ConstructionGuard guard,
|
||||
Manager* manager,
|
||||
Manager::MetadataItr metadata,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats)
|
||||
: Cache(manager, requestedLimit, allowGrowth, enableWindowedStats,
|
||||
[](Cache* p) -> void {
|
||||
delete reinterpret_cast<TransactionalCache*>(p);
|
||||
},
|
||||
sizeof(TransactionalCache)) {
|
||||
// TODO: implement this
|
||||
: Cache(guard, manager, metadata, allowGrowth, enableWindowedStats),
|
||||
_table(nullptr),
|
||||
_logSize(0),
|
||||
_tableSize(1),
|
||||
_maskShift(32),
|
||||
_bucketMask(0),
|
||||
_auxiliaryTable(nullptr),
|
||||
_auxiliaryLogSize(0),
|
||||
_auxiliaryTableSize(1),
|
||||
_auxiliaryMaskShift(32),
|
||||
_auxiliaryBucketMask(0) {
|
||||
_state.lock();
|
||||
if (isOperational()) {
|
||||
_metadata->lock();
|
||||
_table = reinterpret_cast<TransactionalBucket*>(_metadata->table());
|
||||
_logSize = _metadata->logSize();
|
||||
_tableSize = (1ULL << _logSize);
|
||||
_maskShift = 32 - _logSize;
|
||||
_bucketMask = (_tableSize - 1) << _maskShift;
|
||||
_metadata->unlock();
|
||||
}
|
||||
_state.unlock();
|
||||
}
|
||||
|
||||
TransactionalCache::~TransactionalCache() {
|
||||
// TODO: implement this
|
||||
_state.lock();
|
||||
if (!_state.isSet(State::Flag::shutdown)) {
|
||||
_state.unlock();
|
||||
shutdown();
|
||||
}
|
||||
if (_state.isLocked()) {
|
||||
_state.unlock();
|
||||
}
|
||||
}
|
||||
|
||||
bool TransactionalCache::freeMemory() {
|
||||
// TODO: implement this
|
||||
return false;
|
||||
_state.lock();
|
||||
if (!isOperational()) {
|
||||
_state.unlock();
|
||||
return false;
|
||||
}
|
||||
startOperation();
|
||||
_state.unlock();
|
||||
|
||||
bool underLimit = reclaimMemory(0ULL);
|
||||
uint64_t failures = 0;
|
||||
while (!underLimit) {
|
||||
// pick a random bucket
|
||||
uint32_t randomHash = RandomGenerator::interval(UINT32_MAX);
|
||||
bool ok;
|
||||
TransactionalBucket* bucket;
|
||||
std::tie(ok, bucket) = getBucket(randomHash, TRIES_FAST, false);
|
||||
|
||||
if (ok) {
|
||||
failures = 0;
|
||||
// evict LRU freeable value if exists
|
||||
CachedValue* candidate = bucket->evictionCandidate();
|
||||
|
||||
if (candidate != nullptr) {
|
||||
uint64_t size = candidate->size();
|
||||
bucket->evict(candidate);
|
||||
freeValue(candidate);
|
||||
|
||||
underLimit = reclaimMemory(size);
|
||||
}
|
||||
|
||||
bucket->unlock();
|
||||
} else {
|
||||
failures++;
|
||||
if (failures > 100) {
|
||||
_state.lock();
|
||||
bool shouldQuit = !isOperational();
|
||||
_state.unlock();
|
||||
|
||||
if (shouldQuit) {
|
||||
break;
|
||||
} else {
|
||||
failures = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
endOperation();
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TransactionalCache::migrate() {
|
||||
// TODO: implement this
|
||||
return false;
|
||||
_state.lock();
|
||||
if (!isOperational()) {
|
||||
_state.unlock();
|
||||
return false;
|
||||
}
|
||||
startOperation();
|
||||
_metadata->lock();
|
||||
if (_metadata->table() == nullptr || _metadata->auxiliaryTable() == nullptr) {
|
||||
_metadata->unlock();
|
||||
_state.unlock();
|
||||
endOperation();
|
||||
return false;
|
||||
}
|
||||
_auxiliaryTable =
|
||||
reinterpret_cast<TransactionalBucket*>(_metadata->auxiliaryTable());
|
||||
_auxiliaryLogSize = _metadata->auxiliaryLogSize();
|
||||
_auxiliaryTableSize = (1ULL << _auxiliaryLogSize);
|
||||
_auxiliaryMaskShift = (32 - _auxiliaryLogSize);
|
||||
_auxiliaryBucketMask = (_auxiliaryTableSize - 1) << _auxiliaryMaskShift;
|
||||
_metadata->unlock();
|
||||
_state.toggleFlag(State::Flag::migrating);
|
||||
_state.unlock();
|
||||
|
||||
uint64_t term = _manager->_transactions.term();
|
||||
|
||||
for (uint32_t i = 0; i < _tableSize; i++) {
|
||||
// lock current bucket
|
||||
TransactionalBucket* bucket = &(_table[i]);
|
||||
bucket->lock(term, -1LL);
|
||||
term = std::max(term, bucket->_blacklistTerm);
|
||||
|
||||
// collect target bucket(s)
|
||||
std::vector<TransactionalBucket*> targets;
|
||||
if (_logSize > _auxiliaryLogSize) {
|
||||
uint32_t targetIndex = (i << _maskShift) >> _auxiliaryMaskShift;
|
||||
targets.emplace_back(&(_auxiliaryTable[targetIndex]));
|
||||
} else {
|
||||
uint32_t baseIndex = (i << _maskShift) >> _auxiliaryMaskShift;
|
||||
for (size_t j = 0; j < (1U << (_auxiliaryLogSize - _logSize)); j++) {
|
||||
uint32_t targetIndex = baseIndex + j;
|
||||
targets.emplace_back(&(_auxiliaryTable[targetIndex]));
|
||||
}
|
||||
}
|
||||
// lock target bucket(s)
|
||||
for (TransactionalBucket* targetBucket : targets) {
|
||||
targetBucket->lock(term, TRIES_GUARANTEE);
|
||||
term = std::max(term, targetBucket->_blacklistTerm);
|
||||
}
|
||||
|
||||
// update all buckets to maximum term found (guaranteed at most the current)
|
||||
bucket->updateBlacklistTerm(term);
|
||||
for (TransactionalBucket* targetBucket : targets) {
|
||||
targetBucket->updateBlacklistTerm(term);
|
||||
}
|
||||
// now actually migrate any relevant blacklist terms
|
||||
if (bucket->isFullyBlacklisted()) {
|
||||
for (TransactionalBucket* targetBucket : targets) {
|
||||
if (!targetBucket->isFullyBlacklisted()) {
|
||||
(*targetBucket)._state.toggleFlag(State::Flag::blacklisted);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for (size_t j = 0; j < TransactionalBucket::SLOTS_BLACKLIST; j++) {
|
||||
uint32_t hash = bucket->_blacklistHashes[j];
|
||||
if (hash == 0) {
|
||||
break;
|
||||
}
|
||||
uint32_t targetIndex = getIndex(hash, true);
|
||||
TransactionalBucket* targetBucket = &(_auxiliaryTable[targetIndex]);
|
||||
CachedValue* candidate = targetBucket->blacklist(hash, nullptr, 0);
|
||||
TRI_ASSERT(candidate == nullptr);
|
||||
bucket->_blacklistHashes[j] = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// migrate actual values
|
||||
for (size_t j = 0; j < TransactionalBucket::SLOTS_DATA; j++) {
|
||||
size_t k = TransactionalBucket::SLOTS_DATA - (j + 1);
|
||||
if (bucket->_cachedHashes[k] != 0) {
|
||||
uint32_t hash = bucket->_cachedHashes[k];
|
||||
CachedValue* value = bucket->_cachedData[k];
|
||||
|
||||
uint32_t targetIndex = getIndex(hash, true);
|
||||
TransactionalBucket* targetBucket = &(_auxiliaryTable[targetIndex]);
|
||||
if (targetBucket->isBlacklisted(hash)) {
|
||||
uint64_t size = value->size();
|
||||
freeValue(value);
|
||||
reclaimMemory(size);
|
||||
} else {
|
||||
bool haveSpace = true;
|
||||
if (targetBucket->isFull()) {
|
||||
CachedValue* candidate = targetBucket->evictionCandidate();
|
||||
if (candidate != nullptr) {
|
||||
targetBucket->evict(candidate, true);
|
||||
uint64_t size = candidate->size();
|
||||
freeValue(candidate);
|
||||
reclaimMemory(size);
|
||||
} else {
|
||||
haveSpace = false;
|
||||
}
|
||||
}
|
||||
if (haveSpace) {
|
||||
targetBucket->insert(hash, value);
|
||||
} else {
|
||||
uint64_t size = value->size();
|
||||
freeValue(value);
|
||||
reclaimMemory(size);
|
||||
}
|
||||
}
|
||||
|
||||
bucket->_cachedHashes[k] = 0;
|
||||
bucket->_cachedData[k] = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
// unlock targets
|
||||
for (TransactionalBucket* targetBucket : targets) {
|
||||
targetBucket->unlock();
|
||||
}
|
||||
|
||||
// finish up this bucket's migration
|
||||
bucket->_state.toggleFlag(State::Flag::migrated);
|
||||
bucket->unlock();
|
||||
}
|
||||
|
||||
// swap tables and unmark local migrating flag
|
||||
_state.lock();
|
||||
std::swap(_table, _auxiliaryTable);
|
||||
std::swap(_logSize, _auxiliaryLogSize);
|
||||
std::swap(_tableSize, _auxiliaryTableSize);
|
||||
std::swap(_maskShift, _auxiliaryMaskShift);
|
||||
std::swap(_bucketMask, _auxiliaryBucketMask);
|
||||
_state.toggleFlag(State::Flag::migrating);
|
||||
_state.unlock();
|
||||
|
||||
// clear out old table
|
||||
clearTable(_auxiliaryTable, _auxiliaryTableSize);
|
||||
|
||||
// release references to old table
|
||||
_state.lock();
|
||||
_auxiliaryTable = nullptr;
|
||||
_auxiliaryLogSize = 0;
|
||||
_auxiliaryTableSize = 1;
|
||||
_auxiliaryMaskShift = 32;
|
||||
_auxiliaryBucketMask = 0;
|
||||
_state.unlock();
|
||||
|
||||
// swap table in metadata
|
||||
_metadata->lock();
|
||||
_metadata->swapTables();
|
||||
_metadata->unlock();
|
||||
|
||||
endOperation();
|
||||
return true;
|
||||
}
|
||||
|
||||
void TransactionalCache::clearTables() {
|
||||
// TODO: implement this
|
||||
if (_table != nullptr) {
|
||||
clearTable(_table, _tableSize);
|
||||
}
|
||||
if (_auxiliaryTable != nullptr) {
|
||||
clearTable(_auxiliaryTable, _auxiliaryTableSize);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<bool, TransactionalBucket*> TransactionalCache::getBucket(
|
||||
uint32_t hash, int64_t maxTries, bool singleOperation) {
|
||||
TransactionalBucket* bucket = nullptr;
|
||||
|
||||
bool ok = _state.lock(maxTries);
|
||||
if (ok) {
|
||||
bool started = false;
|
||||
ok = isOperational();
|
||||
if (ok) {
|
||||
if (singleOperation) {
|
||||
startOperation();
|
||||
started = true;
|
||||
_metadata->lock();
|
||||
_manager->reportAccess(_metadata->cache());
|
||||
_metadata->unlock();
|
||||
}
|
||||
|
||||
uint64_t term = _manager->_transactions.term();
|
||||
|
||||
bucket = &(_table[getIndex(hash, false)]);
|
||||
ok = bucket->lock(term, maxTries);
|
||||
if (ok &&
|
||||
bucket->isMigrated()) { // get bucket from auxiliary table instead
|
||||
bucket->unlock();
|
||||
bucket = &(_auxiliaryTable[getIndex(hash, true)]);
|
||||
ok = bucket->lock(term, maxTries);
|
||||
if (ok && bucket->isMigrated()) {
|
||||
ok = false;
|
||||
bucket->unlock();
|
||||
}
|
||||
}
|
||||
}
|
||||
if (!ok && started) {
|
||||
endOperation();
|
||||
}
|
||||
_state.unlock();
|
||||
}
|
||||
|
||||
return std::pair<bool, TransactionalBucket*>(ok, bucket);
|
||||
}
|
||||
|
||||
void TransactionalCache::clearTable(TransactionalBucket* table,
|
||||
uint64_t tableSize) {
|
||||
for (uint64_t i = 0; i < tableSize; i++) {
|
||||
TransactionalBucket* bucket = &(table[i]);
|
||||
bucket->lock(0, -1LL); // term doesn't actually matter here
|
||||
for (size_t j = 0; j < TransactionalBucket::SLOTS_DATA; j++) {
|
||||
if (bucket->_cachedData[j] != nullptr) {
|
||||
uint64_t size = bucket->_cachedData[j]->size();
|
||||
freeValue(bucket->_cachedData[j]);
|
||||
reclaimMemory(size);
|
||||
}
|
||||
}
|
||||
bucket->clear();
|
||||
}
|
||||
}
|
||||
|
||||
uint32_t TransactionalCache::getIndex(uint32_t hash, bool useAuxiliary) const {
|
||||
if (useAuxiliary) {
|
||||
return ((hash & _auxiliaryBucketMask) >> _auxiliaryMaskShift);
|
||||
}
|
||||
|
||||
return ((hash & _bucketMask) >> _maskShift);
|
||||
}
|
||||
|
|
|
@ -28,7 +28,10 @@
|
|||
#include "Cache/Cache.h"
|
||||
#include "Cache/CachedValue.h"
|
||||
#include "Cache/FrequencyBuffer.h"
|
||||
#include "Cache/Manager.h"
|
||||
#include "Cache/ManagerTasks.h"
|
||||
#include "Cache/Metadata.h"
|
||||
#include "Cache/State.h"
|
||||
#include "Cache/TransactionalBucket.h"
|
||||
|
||||
#include <stdint.h>
|
||||
|
@ -39,19 +42,73 @@
|
|||
namespace arangodb {
|
||||
namespace cache {
|
||||
|
||||
class Manager; // forward declaration
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief A transactional, LRU-ish cache.
|
||||
///
|
||||
/// To create a cache, see Manager class. Once created, the class has a simple
|
||||
/// API mostly following that of the base Cache class. For any non-pure-virtual
|
||||
/// functions, see Cache.h for documentation. The only additional functions
|
||||
/// exposed on the API of the transactional cache are those dealing with the
|
||||
/// blacklisting of keys.
|
||||
///
|
||||
/// To operate correctly, whenever a key is about to be written to the backing
|
||||
/// store, it must be blacklisted in any corresponding transactional caches.
|
||||
/// This will prevent the cache from serving stale or potentially incorrect
|
||||
/// values and allow for clients to fall through to the backing transactional
|
||||
/// store.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
class TransactionalCache final : public Cache {
|
||||
public:
|
||||
TransactionalCache(Cache::ConstructionGuard guard, Manager* manager,
|
||||
Manager::MetadataItr metadata, bool allowGrowth,
|
||||
bool enableWindowedStats);
|
||||
~TransactionalCache();
|
||||
|
||||
TransactionalCache() = delete;
|
||||
TransactionalCache(TransactionalCache const&) = delete;
|
||||
TransactionalCache& operator=(TransactionalCache const&) = delete;
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Looks up the given key.
|
||||
///
|
||||
/// May report a false negative if it fails to acquire a lock in a timely
|
||||
/// fashion. Should not block for long.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
Cache::Finding find(void const* key, uint32_t keySize);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Attempts to insert the given value.
|
||||
///
|
||||
/// Returns true if inserted, false otherwise. Will not insert if the key is
|
||||
/// (or its corresponding hash) is blacklisted. Will not insert value if this
|
||||
/// would cause the total usage to exceed the limits. May also not insert
|
||||
/// value if it fails to acquire a lock in a timely fashion. Should not block
|
||||
/// for long.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool insert(CachedValue* value);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Attempts to remove the given key.
|
||||
///
|
||||
/// Returns true if the key guaranteed not to be in the cache, false if the
|
||||
/// key may remain in the cache. May leave the key in the cache if it fails to
|
||||
/// acquire a lock in a timely fashion. Makes more attempts to acquire a lock
|
||||
/// before quitting, so may block for longer than find or insert. Client may
|
||||
/// re-try.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool remove(void const* key, uint32_t keySize);
|
||||
void blackList(void const* key, uint32_t keySize);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Attempts to blacklist the given key.
|
||||
///
|
||||
/// Returns true if the key was blacklisted and is guaranteed not to be in the
|
||||
/// cache, false otherwise. May not blacklist the key if it fails to
|
||||
/// acquire a lock in a timely fashion. Makes more attempts to acquire a lock
|
||||
/// before quitting, so may block for longer than find or insert. Client
|
||||
/// should re-try.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool blacklist(void const* key, uint32_t keySize);
|
||||
|
||||
private:
|
||||
// main table info
|
||||
|
@ -74,19 +131,22 @@ class TransactionalCache final : public Cache {
|
|||
friend class MigrateTask;
|
||||
|
||||
private:
|
||||
// creator -- do not use constructor explicitly
|
||||
static std::shared_ptr<Cache> create(Manager* manager, uint64_t requestedSize,
|
||||
static uint64_t allocationSize(bool enableWindowedStats);
|
||||
static std::shared_ptr<Cache> create(Manager* manager,
|
||||
Manager::MetadataItr metadata,
|
||||
bool allowGrowth,
|
||||
bool enableWindowedStats);
|
||||
|
||||
TransactionalCache(Manager* manager, uint64_t requestedLimit,
|
||||
bool allowGrowth, bool enableWindowedStats);
|
||||
~TransactionalCache();
|
||||
|
||||
// management
|
||||
bool freeMemory();
|
||||
bool migrate();
|
||||
void clearTables();
|
||||
|
||||
// helpers
|
||||
std::pair<bool, TransactionalBucket*> getBucket(uint32_t hash,
|
||||
int64_t maxTries,
|
||||
bool singleOperation = true);
|
||||
void clearTable(TransactionalBucket* table, uint64_t tableSize);
|
||||
uint32_t getIndex(uint32_t hash, bool useAuxiliary) const;
|
||||
};
|
||||
|
||||
}; // end namespace cache
|
||||
|
|
|
@ -466,7 +466,7 @@ void ClusterInfo::loadPlan() {
|
|||
std::shared_ptr<LogicalCollection> newCollection;
|
||||
#ifndef USE_ENTERPRISE
|
||||
newCollection = std::make_shared<LogicalCollection>(
|
||||
vocbase, collectionSlice, false);
|
||||
vocbase, collectionSlice);
|
||||
#else
|
||||
VPackSlice isSmart = collectionSlice.get("isSmart");
|
||||
if (isSmart.isTrue()) {
|
||||
|
@ -480,7 +480,7 @@ void ClusterInfo::loadPlan() {
|
|||
}
|
||||
} else {
|
||||
newCollection = std::make_shared<LogicalCollection>(
|
||||
vocbase, collectionSlice, false);
|
||||
vocbase, collectionSlice);
|
||||
}
|
||||
#endif
|
||||
std::string const collectionName = newCollection->name();
|
||||
|
|
|
@ -2167,7 +2167,7 @@ std::unique_ptr<LogicalCollection>
|
|||
ClusterMethods::createCollectionOnCoordinator(TRI_col_type_e collectionType,
|
||||
TRI_vocbase_t* vocbase,
|
||||
VPackSlice parameters) {
|
||||
auto col = std::make_unique<LogicalCollection>(vocbase, parameters, false);
|
||||
auto col = std::make_unique<LogicalCollection>(vocbase, parameters);
|
||||
// Collection is a temporary collection object that undergoes sanity checks etc.
|
||||
// It is not used anywhere and will be cleaned up after this call.
|
||||
// Persist collection will return the real object.
|
||||
|
|
|
@ -570,7 +570,7 @@ void HttpCommTask::processRequest(std::unique_ptr<HttpRequest> request) {
|
|||
<< "\"http-request-begin\",\"" << (void*)this << "\",\""
|
||||
<< _connectionInfo.clientAddress << "\",\""
|
||||
<< HttpRequest::translateMethod(_requestType) << "\",\""
|
||||
<< HttpRequest::translateVersion(_protocolVersion) << "\"," << _fullUrl
|
||||
<< HttpRequest::translateVersion(_protocolVersion) << "\",\"" << _fullUrl
|
||||
<< "\"";
|
||||
|
||||
std::string const& body = request->body();
|
||||
|
|
|
@ -27,8 +27,6 @@
|
|||
#include <limits>
|
||||
#include <stdexcept>
|
||||
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include <boost/optional.hpp>
|
||||
|
||||
#include "Basics/HybridLogicalClock.h"
|
||||
|
@ -52,6 +50,35 @@ using namespace arangodb;
|
|||
using namespace arangodb::basics;
|
||||
using namespace arangodb::rest;
|
||||
|
||||
inline std::size_t validateAndCount(char const* vpStart,
|
||||
char const* vpEnd) {
|
||||
VPackOptions validationOptions = VPackOptions::Defaults;
|
||||
validationOptions.validateUtf8Strings = true;
|
||||
VPackValidator validator(&validationOptions);
|
||||
|
||||
try {
|
||||
std::size_t numPayloads = 0;
|
||||
// check for slice start to the end of Chunk
|
||||
// isSubPart allows the slice to be shorter than the checked buffer.
|
||||
do {
|
||||
validator.validate(vpStart, std::distance(vpStart, vpEnd),
|
||||
/*isSubPart =*/true);
|
||||
|
||||
// get offset to next
|
||||
VPackSlice tmp(vpStart);
|
||||
vpStart += tmp.byteSize();
|
||||
numPayloads++;
|
||||
} while (vpStart != vpEnd);
|
||||
return numPayloads - 1;
|
||||
} catch (std::exception const& e) {
|
||||
LOG_TOPIC(DEBUG, Logger::COMMUNICATION)
|
||||
<< "len: " << std::distance(vpStart, vpEnd) << " - " << VPackSlice(vpStart).toHex();
|
||||
throw std::runtime_error(
|
||||
std::string("error during validation of incoming VPack: ") + e.what());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
VppCommTask::VppCommTask(EventLoop loop, GeneralServer* server,
|
||||
std::unique_ptr<Socket> socket, ConnectionInfo&& info,
|
||||
double timeout, bool skipInit)
|
||||
|
|
|
@ -38,33 +38,6 @@
|
|||
|
||||
namespace arangodb {
|
||||
|
||||
inline std::size_t validateAndCount(char const* vpStart,
|
||||
char const* vpEnd) {
|
||||
VPackOptions validationOptions = VPackOptions::Defaults;
|
||||
validationOptions.validateUtf8Strings = true;
|
||||
VPackValidator validator(&validationOptions);
|
||||
|
||||
std::size_t numPayloads = 0;
|
||||
|
||||
try {
|
||||
// check for slice start to the end of Chunk
|
||||
// isSubPart allows the slice to be shorter than the checked buffer.
|
||||
do {
|
||||
validator.validate(vpStart, std::distance(vpStart, vpEnd),
|
||||
/*isSubPart =*/true);
|
||||
|
||||
// get offset to next
|
||||
VPackSlice tmp(vpStart);
|
||||
vpStart += tmp.byteSize();
|
||||
numPayloads++;
|
||||
} while (vpStart != vpEnd);
|
||||
return numPayloads - 1;
|
||||
} catch (std::exception const& e) {
|
||||
throw std::runtime_error(
|
||||
std::string("error during validation of incoming VPack: ") + e.what());
|
||||
}
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::size_t appendToBuffer(basics::StringBuffer* buffer, T& value) {
|
||||
constexpr std::size_t len = sizeof(T);
|
||||
|
|
|
@ -443,6 +443,10 @@ MMFilesCollection::MMFilesCollection(LogicalCollection* collection,
|
|||
"<properties>.journalSize too small");
|
||||
}
|
||||
|
||||
auto pathSlice = info.get("path");
|
||||
if (pathSlice.isString()) {
|
||||
_path = pathSlice.copyString();
|
||||
}
|
||||
setCompactionStatus("compaction not yet started");
|
||||
}
|
||||
|
||||
|
@ -2310,7 +2314,7 @@ int MMFilesCollection::insert(transaction::Methods* trx,
|
|||
transaction::BuilderLeaser builder(trx);
|
||||
VPackSlice newSlice;
|
||||
int res = TRI_ERROR_NO_ERROR;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
if (options.recoveryData == nullptr) {
|
||||
TIMER_START(TRANSACTION_NEW_OBJECT_FOR_INSERT);
|
||||
res = newObjectForInsert(trx, slice, fromSlice, toSlice, isEdgeCollection,
|
||||
*builder.get(), options.isRestore);
|
||||
|
@ -2332,10 +2336,10 @@ int MMFilesCollection::insert(transaction::Methods* trx,
|
|||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), newSlice);
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
if (options.recoveryData == nullptr) {
|
||||
marker = &insertMarker;
|
||||
} else {
|
||||
marker = options.recoveryMarker;
|
||||
marker = static_cast<MMFilesWalMarker*>(options.recoveryData);
|
||||
}
|
||||
|
||||
// now insert into indexes
|
||||
|
@ -2742,7 +2746,7 @@ int MMFilesCollection::update(arangodb::transaction::Methods* trx,
|
|||
|
||||
// merge old and new values
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
if (options.recoveryData == nullptr) {
|
||||
mergeObjectsForUpdate(trx, oldDoc, newSlice, isEdgeCollection,
|
||||
TRI_RidToString(revisionId), options.mergeObjects,
|
||||
options.keepNull, *builder.get());
|
||||
|
@ -2764,10 +2768,10 @@ int MMFilesCollection::update(arangodb::transaction::Methods* trx,
|
|||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
if (options.recoveryData == nullptr) {
|
||||
marker = &updateMarker;
|
||||
} else {
|
||||
marker = options.recoveryMarker;
|
||||
marker = static_cast<MMFilesWalMarker*>(options.recoveryData);
|
||||
}
|
||||
|
||||
VPackSlice const newDoc(marker->vpack());
|
||||
|
@ -2889,10 +2893,10 @@ int MMFilesCollection::replace(
|
|||
static_cast<MMFilesTransactionState*>(trx->state())->idForMarker(), builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
if (options.recoveryData == nullptr) {
|
||||
marker = &replaceMarker;
|
||||
} else {
|
||||
marker = options.recoveryMarker;
|
||||
marker = static_cast<MMFilesWalMarker*>(options.recoveryData);
|
||||
}
|
||||
|
||||
VPackSlice const newDoc(marker->vpack());
|
||||
|
@ -2969,10 +2973,10 @@ int MMFilesCollection::remove(arangodb::transaction::Methods* trx, VPackSlice co
|
|||
builder->slice());
|
||||
|
||||
MMFilesWalMarker const* marker;
|
||||
if (options.recoveryMarker == nullptr) {
|
||||
if (options.recoveryData == nullptr) {
|
||||
marker = &removeMarker;
|
||||
} else {
|
||||
marker = options.recoveryMarker;
|
||||
marker = static_cast<MMFilesWalMarker*>(options.recoveryData);
|
||||
}
|
||||
|
||||
TRI_IF_FAILURE("RemoveDocumentNoLock") {
|
||||
|
|
|
@ -130,7 +130,9 @@ MMFilesEngine::MMFilesEngine(application_features::ApplicationServer* server)
|
|||
: StorageEngine(server, EngineName, FeatureName, new MMFilesIndexFactory())
|
||||
, _isUpgrade(false)
|
||||
, _maxTick(0)
|
||||
{}
|
||||
{
|
||||
startsAfter("PersistentIndex");
|
||||
}
|
||||
|
||||
MMFilesEngine::~MMFilesEngine() {
|
||||
}
|
||||
|
@ -533,6 +535,10 @@ int MMFilesEngine::getCollectionsAndIndexes(TRI_vocbase_t* vocbase,
|
|||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
void MMFilesEngine::waitForSync(TRI_voc_tick_t tick) {
|
||||
MMFilesLogfileManager::instance()->slots()->waitForTick(tick);
|
||||
}
|
||||
|
||||
TRI_vocbase_t* MMFilesEngine::openDatabase(arangodb::velocypack::Slice const& args, bool isUpgrade, int& status) {
|
||||
VPackSlice idSlice = args.get("id");
|
||||
|
@ -1276,7 +1282,7 @@ TRI_vocbase_t* MMFilesEngine::openExistingDatabase(TRI_voc_tick_t id, std::strin
|
|||
for (auto const& it : VPackArrayIterator(slice)) {
|
||||
// we found a collection that is still active
|
||||
TRI_ASSERT(!it.get("id").isNone() || !it.get("cid").isNone());
|
||||
auto uniqCol = std::make_unique<arangodb::LogicalCollection>(vocbase.get(), it, true);
|
||||
auto uniqCol = std::make_unique<arangodb::LogicalCollection>(vocbase.get(), it);
|
||||
auto collection = uniqCol.get();
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
StorageEngine::registerCollection(vocbase.get(), uniqCol.get());
|
||||
|
|
|
@ -117,6 +117,8 @@ class MMFilesEngine final : public StorageEngine {
|
|||
std::string databasePath(TRI_vocbase_t const* vocbase) const override {
|
||||
return databaseDirectory(vocbase->id());
|
||||
}
|
||||
|
||||
void waitForSync(TRI_voc_tick_t tick) override;
|
||||
|
||||
virtual TRI_vocbase_t* openDatabase(arangodb::velocypack::Slice const& parameters, bool isUpgrade, int&) override;
|
||||
Database* createDatabase(TRI_voc_tick_t id, arangodb::velocypack::Slice const& args, int& status) override {
|
||||
|
|
|
@ -65,7 +65,7 @@ PersistentIndexFeature::PersistentIndexFeature(
|
|||
_keepLogFileNum(1000), _logFileTimeToRoll(0), _compactionReadaheadSize(0) {
|
||||
setOptional(true);
|
||||
requiresElevatedPrivileges(false);
|
||||
startsAfter("MMFilesLogfileManager");
|
||||
// startsAfter("MMFilesLogfileManager");
|
||||
startsAfter("DatabasePath");
|
||||
}
|
||||
|
||||
|
|
|
@ -496,7 +496,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
|
|||
|
||||
OperationOptions options;
|
||||
options.silent = true;
|
||||
options.recoveryMarker = envelope;
|
||||
options.recoveryData = static_cast<void*>(envelope);
|
||||
options.isRestore = true;
|
||||
options.waitForSync = false;
|
||||
options.ignoreRevs = true;
|
||||
|
@ -573,7 +573,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
|
|||
|
||||
OperationOptions options;
|
||||
options.silent = true;
|
||||
options.recoveryMarker = envelope;
|
||||
options.recoveryData = static_cast<void*>(envelope);
|
||||
options.waitForSync = false;
|
||||
options.ignoreRevs = true;
|
||||
|
||||
|
@ -733,7 +733,7 @@ bool MMFilesWalRecoverState::ReplayMarker(TRI_df_marker_t const* marker,
|
|||
// dropped later
|
||||
bool const forceSync = state->willBeDropped(databaseId, collectionId);
|
||||
CollectionResult res = collection->updateProperties(payloadSlice, forceSync);
|
||||
if (res.successful()) {
|
||||
if (!res.successful()) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "cannot change collection properties for collection "
|
||||
<< collectionId << " in database " << databaseId << ": "
|
||||
<< res.errorMessage;
|
||||
|
|
|
@ -106,17 +106,14 @@ bool RestQueryHandler::readQuery(bool slow) {
|
|||
result.add(VPackValue(VPackValueType::Array));
|
||||
|
||||
for (auto const& q : queries) {
|
||||
auto const& timeString = TRI_StringTimeStamp(q.started, Logger::getUseLocalTime());
|
||||
|
||||
auto const& queryString = q.queryString;
|
||||
auto const& queryState = q.queryState.substr(8, q.queryState.size() - 9);
|
||||
auto timeString = TRI_StringTimeStamp(q.started, Logger::getUseLocalTime());
|
||||
|
||||
result.add(VPackValue(VPackValueType::Object));
|
||||
result.add("id", VPackValue(StringUtils::itoa(q.id)));
|
||||
result.add("query", VPackValue(queryString));
|
||||
result.add("query", VPackValue(q.queryString));
|
||||
result.add("started", VPackValue(timeString));
|
||||
result.add("runTime", VPackValue(q.runTime));
|
||||
result.add("state", VPackValue(queryState));
|
||||
result.add("state", VPackValue(QueryExecutionState::toString(q.state)));
|
||||
result.close();
|
||||
}
|
||||
result.close();
|
||||
|
|
|
@ -26,7 +26,6 @@
|
|||
#include "ProgramOptions/ProgramOptions.h"
|
||||
#include "ProgramOptions/Section.h"
|
||||
#include "MMFiles/MMFilesEngine.h"
|
||||
//#include "StorageEngine/RocksDBEngine.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
@ -61,6 +60,7 @@ void EngineSelectorFeature::prepare() {
|
|||
e->enable();
|
||||
|
||||
// register storage engine
|
||||
TRI_ASSERT(ENGINE == nullptr);
|
||||
ENGINE = e;
|
||||
} else {
|
||||
// turn off all other storage engines
|
||||
|
@ -90,6 +90,5 @@ std::unordered_set<std::string> EngineSelectorFeature::availableEngineNames() {
|
|||
std::unordered_map<std::string, std::string> EngineSelectorFeature::availableEngines() {
|
||||
return std::unordered_map<std::string, std::string>{
|
||||
{MMFilesEngine::EngineName, MMFilesEngine::FeatureName}
|
||||
//,{RocksDBEngine::EngineName, RocksDBEngine::FeatureName}
|
||||
};
|
||||
}
|
||||
|
|
|
@ -125,6 +125,8 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
|
||||
using Database = TRI_vocbase_t;
|
||||
using CollectionView = LogicalCollection;
|
||||
|
||||
virtual void waitForSync(TRI_voc_tick_t tick) = 0;
|
||||
|
||||
//// operations on databasea
|
||||
|
||||
|
|
|
@ -39,7 +39,6 @@
|
|||
#include "Cluster/ServerState.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h" //TODO -- remove -- waitForTick
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "StorageEngine/TransactionCollection.h"
|
||||
|
@ -1414,7 +1413,7 @@ OperationResult transaction::Methods::insertLocal(std::string const& collectionN
|
|||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 && isSingleOperationTransaction()) {
|
||||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
|
@ -1723,7 +1722,7 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 && isSingleOperationTransaction()) {
|
||||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
|
@ -1967,7 +1966,7 @@ OperationResult transaction::Methods::removeLocal(std::string const& collectionN
|
|||
|
||||
// wait for operation(s) to be synced to disk here
|
||||
if (res == TRI_ERROR_NO_ERROR && options.waitForSync && maxTick > 0 && isSingleOperationTransaction()) {
|
||||
MMFilesLogfileManager::instance()->slots()->waitForTick(maxTick);
|
||||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
|
|
|
@ -27,17 +27,15 @@
|
|||
#include "Basics/Common.h"
|
||||
|
||||
namespace arangodb {
|
||||
class MMFilesWalMarker;
|
||||
|
||||
// a struct for keeping document modification operations in transactions
|
||||
struct OperationOptions {
|
||||
OperationOptions()
|
||||
: recoveryMarker(nullptr), waitForSync(false), keepNull(true),
|
||||
: recoveryData(nullptr), waitForSync(false), keepNull(true),
|
||||
mergeObjects(true), silent(false), ignoreRevs(true),
|
||||
returnOld(false), returnNew(false), isRestore(false) {}
|
||||
|
||||
// original marker, set by the recovery procedure only!
|
||||
MMFilesWalMarker* recoveryMarker;
|
||||
// original marker, set by the MMFiles recovery procedure only!
|
||||
void* recoveryData;
|
||||
|
||||
// wait until the operation has been synced
|
||||
bool waitForSync;
|
||||
|
|
|
@ -30,8 +30,6 @@
|
|||
#include "VocBase/voc-types.h"
|
||||
|
||||
namespace arangodb {
|
||||
class MMFilesDocumentDitch;
|
||||
|
||||
namespace transaction {
|
||||
class Context;
|
||||
}
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "ApplicationFeatures/HttpEndpointProvider.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/QueryCache.h"
|
||||
#include "Aql/QueryExecutionState.h"
|
||||
#include "Aql/QueryList.h"
|
||||
#include "Aql/QueryRegistry.h"
|
||||
#include "Basics/HybridLogicalClock.h"
|
||||
|
@ -1475,8 +1476,7 @@ static void JS_QueriesCurrentAql(
|
|||
auto result = v8::Array::New(isolate, static_cast<int>(queries.size()));
|
||||
|
||||
for (auto q : queries) {
|
||||
auto const&& timeString = TRI_StringTimeStamp(q.started, false);
|
||||
auto const& queryState = q.queryState.substr(8, q.queryState.size() - 9);
|
||||
auto timeString = TRI_StringTimeStamp(q.started, false);
|
||||
|
||||
v8::Handle<v8::Object> obj = v8::Object::New(isolate);
|
||||
obj->Set(TRI_V8_ASCII_STRING("id"), V8TickId(isolate, q.id));
|
||||
|
@ -1484,7 +1484,7 @@ static void JS_QueriesCurrentAql(
|
|||
obj->Set(TRI_V8_ASCII_STRING("started"), TRI_V8_STD_STRING(timeString));
|
||||
obj->Set(TRI_V8_ASCII_STRING("runTime"),
|
||||
v8::Number::New(isolate, q.runTime));
|
||||
obj->Set(TRI_V8_ASCII_STRING("state"), TRI_V8_STD_STRING(queryState));
|
||||
obj->Set(TRI_V8_ASCII_STRING("state"), TRI_V8_STD_STRING(aql::QueryExecutionState::toString(q.state)));
|
||||
result->Set(i++, obj);
|
||||
}
|
||||
|
||||
|
@ -1528,8 +1528,7 @@ static void JS_QueriesSlowAql(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
auto result = v8::Array::New(isolate, static_cast<int>(queries.size()));
|
||||
|
||||
for (auto q : queries) {
|
||||
auto const&& timeString = TRI_StringTimeStamp(q.started, false);
|
||||
auto const& queryState = q.queryState.substr(8, q.queryState.size() - 9);
|
||||
auto timeString = TRI_StringTimeStamp(q.started, false);
|
||||
|
||||
v8::Handle<v8::Object> obj = v8::Object::New(isolate);
|
||||
obj->Set(TRI_V8_ASCII_STRING("id"), V8TickId(isolate, q.id));
|
||||
|
@ -1537,7 +1536,7 @@ static void JS_QueriesSlowAql(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
obj->Set(TRI_V8_ASCII_STRING("started"), TRI_V8_STD_STRING(timeString));
|
||||
obj->Set(TRI_V8_ASCII_STRING("runTime"),
|
||||
v8::Number::New(isolate, q.runTime));
|
||||
obj->Set(TRI_V8_ASCII_STRING("state"), TRI_V8_STD_STRING(queryState));
|
||||
obj->Set(TRI_V8_ASCII_STRING("state"), TRI_V8_STD_STRING(aql::QueryExecutionState::toString(q.state)));
|
||||
result->Set(i++, obj);
|
||||
}
|
||||
|
||||
|
|
|
@ -168,6 +168,7 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
|
|||
_cleanupIndexes(0),
|
||||
_persistentIndexes(0),
|
||||
_physical(other.getPhysical()->clone(this, other.getPhysical())) {
|
||||
TRI_ASSERT(_physical != nullptr);
|
||||
if (ServerState::instance()->isDBServer() ||
|
||||
!ServerState::instance()->isRunningInCluster()) {
|
||||
_followers.reset(new FollowerInfo(this));
|
||||
|
@ -185,7 +186,7 @@ LogicalCollection::LogicalCollection(LogicalCollection const& other)
|
|||
// The Slice contains the part of the plan that
|
||||
// is relevant for this collection.
|
||||
LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
||||
VPackSlice const& info, bool isPhysical)
|
||||
VPackSlice const& info)
|
||||
: _internalVersion(0),
|
||||
_cid(ReadCid(info)),
|
||||
_planId(ReadPlanId(info, _cid)),
|
||||
|
@ -215,7 +216,7 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
_persistentIndexes(0),
|
||||
_physical(
|
||||
EngineSelectorFeature::ENGINE->createPhysicalCollection(this, info)) {
|
||||
getPhysical()->setPath(ReadStringValue(info, "path", ""));
|
||||
TRI_ASSERT(_physical != nullptr);
|
||||
if (!IsAllowedName(info)) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_ARANGO_ILLEGAL_NAME);
|
||||
}
|
||||
|
@ -398,21 +399,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
}
|
||||
#endif
|
||||
|
||||
if (!ServerState::instance()->isCoordinator() && isPhysical) {
|
||||
// If we are not in the coordinator we need a path
|
||||
// to the physical data.
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
if (getPhysical()->path().empty()) {
|
||||
std::string path = engine->createCollection(_vocbase, _cid, this);
|
||||
getPhysical()->setPath(path);
|
||||
}
|
||||
}
|
||||
|
||||
int64_t count = Helper::readNumericValue<int64_t>(info, "count", -1);
|
||||
if (count != -1) {
|
||||
_physical->updateCount(count);
|
||||
}
|
||||
|
||||
if (ServerState::instance()->isDBServer() ||
|
||||
!ServerState::instance()->isRunningInCluster()) {
|
||||
_followers.reset(new FollowerInfo(this));
|
||||
|
@ -420,7 +406,6 @@ LogicalCollection::LogicalCollection(TRI_vocbase_t* vocbase,
|
|||
|
||||
// update server's tick value
|
||||
TRI_UpdateTickServer(static_cast<TRI_voc_tick_t>(_cid));
|
||||
|
||||
}
|
||||
|
||||
LogicalCollection::~LogicalCollection() {}
|
||||
|
@ -1128,6 +1113,21 @@ bool LogicalCollection::dropIndex(TRI_idx_iid_t iid) {
|
|||
return _physical->dropIndex(iid);
|
||||
}
|
||||
|
||||
|
||||
/// @brief Persist the connected physical collection.
|
||||
/// This should be called AFTER the collection is successfully
|
||||
/// created and only on Sinlge/DBServer
|
||||
void LogicalCollection::persistPhysicalCollection() {
|
||||
// Coordinators are not allowed to have local collections!
|
||||
TRI_ASSERT(!ServerState::instance()->isCoordinator());
|
||||
|
||||
// We have not yet persisted this collection!
|
||||
TRI_ASSERT(getPhysical()->path().empty());
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
std::string path = engine->createCollection(_vocbase, _cid, this);
|
||||
getPhysical()->setPath(path);
|
||||
}
|
||||
|
||||
/// @brief creates the initial indexes for the collection
|
||||
void LogicalCollection::createInitialIndexes() {
|
||||
if (!_indexes.empty()) {
|
||||
|
|
|
@ -92,8 +92,7 @@ class LogicalCollection {
|
|||
friend struct ::TRI_vocbase_t;
|
||||
|
||||
public:
|
||||
LogicalCollection(TRI_vocbase_t*, velocypack::Slice const&,
|
||||
bool isPhysical);
|
||||
LogicalCollection(TRI_vocbase_t*, velocypack::Slice const&);
|
||||
|
||||
virtual ~LogicalCollection();
|
||||
|
||||
|
@ -318,6 +317,11 @@ class LogicalCollection {
|
|||
TRI_voc_tick_t maxTick,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
/// @brief Persist the connected physical collection.
|
||||
/// This should be called AFTER the collection is successfully
|
||||
/// created and only on Sinlge/DBServer
|
||||
void persistPhysicalCollection();
|
||||
|
||||
private:
|
||||
// SECTION: Index creation
|
||||
|
||||
|
|
|
@ -339,7 +339,7 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
|
|||
// Try to create a new collection. This is not registered yet
|
||||
|
||||
std::unique_ptr<arangodb::LogicalCollection> collection =
|
||||
std::make_unique<arangodb::LogicalCollection>(this, parameters, true);
|
||||
std::make_unique<arangodb::LogicalCollection>(this, parameters);
|
||||
TRI_ASSERT(collection != nullptr);
|
||||
|
||||
WRITE_LOCKER(writeLocker, _collectionsLock);
|
||||
|
@ -364,6 +364,10 @@ arangodb::LogicalCollection* TRI_vocbase_t::createCollectionWorker(
|
|||
collection->setStatus(TRI_VOC_COL_STATUS_LOADED);
|
||||
// set collection version to 3.1, as the collection is just created
|
||||
collection->setVersion(LogicalCollection::VERSION_31);
|
||||
|
||||
// Let's try to persist it.
|
||||
collection->persistPhysicalCollection();
|
||||
|
||||
events::CreateCollection(name, TRI_ERROR_NO_ERROR);
|
||||
return collection.release();
|
||||
} catch (...) {
|
||||
|
|
|
@ -32,6 +32,8 @@
|
|||
#include "SimpleHttpClient/GeneralClientConnection.h"
|
||||
#include "SimpleHttpClient/SimpleHttpClient.h"
|
||||
|
||||
#include <regex>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::basics;
|
||||
using namespace arangodb::httpclient;
|
||||
|
@ -113,12 +115,10 @@ void ImportFeature::collectOptions(
|
|||
new DiscreteValuesParameter<StringParameter>(&_createCollectionType,
|
||||
types));
|
||||
|
||||
std::unordered_set<std::string> imports = {"csv", "tsv", "json"};
|
||||
std::vector<std::string> importsVector(imports.begin(), imports.end());
|
||||
std::string importsJoined = StringUtils::join(importsVector, ", ");
|
||||
std::unordered_set<std::string> imports = {"csv", "tsv", "json", "jsonl", "auto"};
|
||||
|
||||
options->addOption(
|
||||
"--type", "type of file (" + importsJoined + ")",
|
||||
"--type", "type of import file",
|
||||
new DiscreteValuesParameter<StringParameter>(&_typeImport, imports));
|
||||
|
||||
options->addOption(
|
||||
|
@ -220,7 +220,25 @@ void ImportFeature::start() {
|
|||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << httpClient->getErrorMessage() << "'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (_typeImport == "auto") {
|
||||
std::regex re = std::regex(".*?\\.([a-zA-Z]+)", std::regex::ECMAScript);
|
||||
std::smatch match;
|
||||
if (!std::regex_match(_filename, match, re)) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Cannot auto-detect file type from filename '" << _filename << "'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
std::string extension = match[1].str();
|
||||
if (extension == "json" || extension == "jsonl" || extension == "csv" || extension == "tsv") {
|
||||
_typeImport = extension;
|
||||
} else {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "Unsupported file extension '" << extension << "'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// successfully connected
|
||||
std::cout << "Connected to ArangoDB '"
|
||||
<< httpClient->getEndpointSpecification() << "', version "
|
||||
|
@ -364,9 +382,9 @@ void ImportFeature::start() {
|
|||
arangodb::import::ImportHelper::TSV);
|
||||
}
|
||||
|
||||
else if (_typeImport == "json") {
|
||||
else if (_typeImport == "json" || _typeImport == "jsonl") {
|
||||
std::cout << "Starting JSON import..." << std::endl;
|
||||
ok = ih.importJson(_collectionName, _filename);
|
||||
ok = ih.importJson(_collectionName, _filename, (_typeImport == "jsonl"));
|
||||
}
|
||||
|
||||
else {
|
||||
|
|
|
@ -280,7 +280,8 @@ bool ImportHelper::importDelimited(std::string const& collectionName,
|
|||
}
|
||||
|
||||
bool ImportHelper::importJson(std::string const& collectionName,
|
||||
std::string const& fileName) {
|
||||
std::string const& fileName,
|
||||
bool assumeLinewise) {
|
||||
_collectionName = collectionName;
|
||||
_firstLine = "";
|
||||
_outputBuffer.clear();
|
||||
|
@ -309,6 +310,11 @@ bool ImportHelper::importJson(std::string const& collectionName,
|
|||
bool isObject = false;
|
||||
bool checkedFront = false;
|
||||
|
||||
if (assumeLinewise) {
|
||||
checkedFront = true;
|
||||
isObject = false;
|
||||
}
|
||||
|
||||
// progress display control variables
|
||||
int64_t totalRead = 0;
|
||||
double nextProgress = ProgressStep;
|
||||
|
@ -345,8 +351,7 @@ bool ImportHelper::importJson(std::string const& collectionName,
|
|||
|
||||
if (!checkedFront) {
|
||||
// detect the import file format (single lines with individual JSON
|
||||
// objects
|
||||
// or a JSON array with all documents)
|
||||
// objects or a JSON array with all documents)
|
||||
char const* p = _outputBuffer.begin();
|
||||
char const* e = _outputBuffer.end();
|
||||
|
||||
|
|
|
@ -79,7 +79,8 @@ class ImportHelper {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool importJson(std::string const& collectionName,
|
||||
std::string const& fileName);
|
||||
std::string const& fileName,
|
||||
bool assumeLinewise);
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief sets the action to carry out on duplicate _key
|
||||
|
|
|
@ -1024,7 +1024,7 @@ static void ClientConnection_importJson(
|
|||
std::string fileName = TRI_ObjectToString(isolate, args[0]);
|
||||
std::string collectionName = TRI_ObjectToString(isolate, args[1]);
|
||||
|
||||
if (ih.importJson(collectionName, fileName)) {
|
||||
if (ih.importJson(collectionName, fileName, false)) {
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
|
||||
result->Set(TRI_V8_ASCII_STRING("lines"),
|
||||
|
|
|
@ -3396,6 +3396,7 @@ const recoveryTests = [
|
|||
'disk-full-logfile-data',
|
||||
'disk-full-datafile',
|
||||
'collection-drop-recreate',
|
||||
'collection-duplicate-name',
|
||||
'create-with-temp',
|
||||
'create-with-temp-old',
|
||||
'create-collection-fail',
|
||||
|
@ -3421,6 +3422,7 @@ const recoveryTests = [
|
|||
'resume-recovery-other',
|
||||
'resume-recovery',
|
||||
'foxx-directories',
|
||||
'collection-duplicate',
|
||||
'collection-rename',
|
||||
'collection-properties',
|
||||
'empty-logfiles',
|
||||
|
|
|
@ -1,20 +0,0 @@
|
|||
Source,Target
|
||||
1,3
|
||||
10,8
|
||||
11,14
|
||||
12,18
|
||||
13,6
|
||||
14,12
|
||||
15,19
|
||||
16,19
|
||||
17,19
|
||||
18,15
|
||||
2,20
|
||||
20,19
|
||||
3,14
|
||||
4,16
|
||||
5,10
|
||||
6,19
|
||||
7,2
|
||||
8,1
|
||||
9,2
|
|
Binary file not shown.
|
@ -1,21 +0,0 @@
|
|||
Id,Degree,Eccentricity,Closeness Centrality,Betweenness Centrality
|
||||
1,2,9.0,5.2631578947368425,48.0
|
||||
2,3,11.0,4.842105263157895,35.0
|
||||
3,2,8.0,4.631578947368421,60.0
|
||||
4,1,11.0,5.2631578947368425,0.0
|
||||
5,1,12.0,7.7894736842105265,0.0
|
||||
6,2,10.0,4.315789473684211,18.0
|
||||
7,1,12.0,5.7894736842105265,0.0
|
||||
8,2,10.0,6.0,34.0
|
||||
9,1,12.0,5.7894736842105265,0.0
|
||||
10,2,11.0,6.842105263157895,18.0
|
||||
11,1,8.0,5.052631578947368,0.0
|
||||
12,2,6.0,3.789473684210526,84.0
|
||||
13,1,11.0,5.2631578947368425,0.0
|
||||
14,3,7.0,4.105263157894737,83.0
|
||||
15,2,8.0,3.473684210526316,90.0
|
||||
16,2,10.0,4.315789473684211,18.0
|
||||
17,1,10.0,4.421052631578948,0.0
|
||||
18,2,7.0,3.5789473684210527,88.0
|
||||
19,5,9.0,3.473684210526316,118.0
|
||||
20,2,10.0,4.105263157894737,48.0
|
|
|
@ -634,4 +634,4 @@ function getFieldDef(schema, parentType, fieldName) {
|
|||
return _introspection.TypeNameMetaFieldDef;
|
||||
}
|
||||
return parentType.getFields()[fieldName];
|
||||
}
|
||||
}
|
||||
|
|
|
@ -186,4 +186,4 @@ function coerceValue(type, value) {
|
|||
if (!(0, _isNullish2.default)(parsed)) {
|
||||
return parsed;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -66,4 +66,4 @@ function graphql(schema, requestString, rootValue, contextValue, variableValues,
|
|||
*
|
||||
* `data` is the result of a successful execution of the query.
|
||||
* `errors` is included when any errors occurred as a non-empty array.
|
||||
*/
|
||||
*/
|
||||
|
|
|
@ -38,7 +38,7 @@ module.exports = function graphql (cfg) {
|
|||
return variables;
|
||||
}
|
||||
try {
|
||||
return JSON.stringify(variables);
|
||||
return JSON.parse(variables);
|
||||
} catch (e) {
|
||||
res.throw(400, 'Variables are invalid JSON', e);
|
||||
}
|
||||
|
|
|
@ -0,0 +1,82 @@
|
|||
/* jshint globalstrict:false, strict:false, unused : false */
|
||||
/* global assertEqual, assertNull, assertTrue, assertFalse */
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief tests for dump/reload
|
||||
// /
|
||||
// / @file
|
||||
// /
|
||||
// / DISCLAIMER
|
||||
// /
|
||||
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
// /
|
||||
// / Licensed under the Apache License, Version 2.0 (the "License")
|
||||
// / you may not use this file except in compliance with the License.
|
||||
// / You may obtain a copy of the License at
|
||||
// /
|
||||
// / http://www.apache.org/licenses/LICENSE-2.0
|
||||
// /
|
||||
// / Unless required by applicable law or agreed to in writing, software
|
||||
// / distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// / See the License for the specific language governing permissions and
|
||||
// / limitations under the License.
|
||||
// /
|
||||
// / Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
// /
|
||||
// / @author Michael Hackstein
|
||||
// / @author Copyright 2017, ArangoDB GmbH, Cologne, Germany
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
var db = require('@arangodb').db;
|
||||
var internal = require('internal');
|
||||
var jsunity = require('jsunity');
|
||||
|
||||
|
||||
function runSetup () {
|
||||
db._drop('UnitTestsRecovery');
|
||||
db._create('UnitTestsRecovery');
|
||||
|
||||
try {
|
||||
db._create('UnitTestsRecovery');
|
||||
} catch (e) {
|
||||
// This intentionally should fail!
|
||||
if (internal.errors.ERROR_ARANGO_DUPLICATE_NAME.code === e.errorNum) {
|
||||
// Only this is a valid return code from the server
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
// Fail if we get here. We somehow managed to save the same collection twice without error
|
||||
return 1;
|
||||
};
|
||||
|
||||
|
||||
function recoverySuite () {
|
||||
'use strict';
|
||||
jsunity.jsUnity.attachAssertions();
|
||||
|
||||
return {
|
||||
setUp: function () {},
|
||||
tearDown: function () {},
|
||||
|
||||
testCollectionDuplicateName: function () {
|
||||
var c = db._collection('UnitTestsRecovery');
|
||||
assertTrue(c !== null && c !== undefined);
|
||||
}
|
||||
};
|
||||
};
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief executes the test suite
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function main (argv) {
|
||||
'use strict';
|
||||
if (argv[1] === 'setup') {
|
||||
return runSetup();
|
||||
} else {
|
||||
jsunity.run(recoverySuite);
|
||||
return jsunity.done();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
/* jshint globalstrict:false, strict:false, unused : false */
|
||||
/* global assertEqual */
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief tests for dump/reload
|
||||
// /
|
||||
// / @file
|
||||
// /
|
||||
// / DISCLAIMER
|
||||
// /
|
||||
// / Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
// /
|
||||
// / Licensed under the Apache License, Version 2.0 (the "License")
|
||||
// / you may not use this file except in compliance with the License.
|
||||
// / You may obtain a copy of the License at
|
||||
// /
|
||||
// / http://www.apache.org/licenses/LICENSE-2.0
|
||||
// /
|
||||
// / Unless required by applicable law or agreed to in writing, software
|
||||
// / distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// / WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// / See the License for the specific language governing permissions and
|
||||
// / limitations under the License.
|
||||
// /
|
||||
// / Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
// /
|
||||
// / @author Jan Steemann
|
||||
// / @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var db = require('@arangodb').db;
|
||||
var internal = require('internal');
|
||||
var jsunity = require('jsunity');
|
||||
|
||||
function runSetup () {
|
||||
'use strict';
|
||||
internal.debugClearFailAt();
|
||||
|
||||
db._drop('UnitTestsRecovery');
|
||||
var c = db._create('UnitTestsRecovery');
|
||||
|
||||
// try to re-create collection with the same name
|
||||
try {
|
||||
db._create('UnitTestsRecovery');
|
||||
} catch (err) {
|
||||
}
|
||||
|
||||
c.save({ _key: 'foo' }, true);
|
||||
|
||||
internal.debugSegfault('crashing server');
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief test suite
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function recoverySuite () {
|
||||
'use strict';
|
||||
jsunity.jsUnity.attachAssertions();
|
||||
|
||||
return {
|
||||
setUp: function () {},
|
||||
tearDown: function () {},
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief test whether we can restore the trx data
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCollectionDuplicate: function () {
|
||||
var c = db._collection('UnitTestsRecovery');
|
||||
assertEqual(1, c.count());
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
// / @brief executes the test suite
|
||||
// //////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function main (argv) {
|
||||
'use strict';
|
||||
if (argv[1] === 'setup') {
|
||||
runSetup();
|
||||
return 0;
|
||||
} else {
|
||||
jsunity.run(recoverySuite);
|
||||
return jsunity.done().status ? 0 : 1;
|
||||
}
|
||||
}
|
|
@ -22,8 +22,8 @@
|
|||
|
||||
#include "DaemonFeature.h"
|
||||
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
|
||||
#include "Basics/FileUtils.h"
|
||||
#include "Logger/Logger.h"
|
||||
|
@ -65,13 +65,15 @@ void DaemonFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
if (!_daemon) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
||||
if (_pidFile.empty()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "need --pid-file in --daemon mode";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "need --pid-file in --daemon mode";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
LoggerFeature* logger = ApplicationServer::getFeature<LoggerFeature>("Logger");
|
||||
LoggerFeature* logger =
|
||||
ApplicationServer::getFeature<LoggerFeature>("Logger");
|
||||
logger->setBackgrounded(true);
|
||||
|
||||
// make the pid filename absolute
|
||||
|
@ -79,14 +81,16 @@ void DaemonFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
std::string currentDir = FileUtils::currentDirectory(&err);
|
||||
|
||||
char* absoluteFile =
|
||||
TRI_GetAbsolutePath(_pidFile.c_str(), currentDir.c_str());
|
||||
TRI_GetAbsolutePath(_pidFile.c_str(), currentDir.c_str());
|
||||
|
||||
if (absoluteFile != nullptr) {
|
||||
_pidFile = std::string(absoluteFile);
|
||||
TRI_Free(TRI_UNKNOWN_MEM_ZONE, absoluteFile);
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "using absolute pid file '" << _pidFile << "'";
|
||||
LOG_TOPIC(DEBUG, arangodb::Logger::FIXME) << "using absolute pid file '"
|
||||
<< _pidFile << "'";
|
||||
} else {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot determine absolute path";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot determine absolute path";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
|
@ -127,7 +131,8 @@ void DaemonFeature::unprepare() {
|
|||
|
||||
// remove pid file
|
||||
if (!FileUtils::remove(_pidFile)) {
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot remove pid file '" << _pidFile << "'";
|
||||
LOG_TOPIC(ERR, arangodb::Logger::FIXME) << "cannot remove pid file '"
|
||||
<< _pidFile << "'";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -135,7 +140,8 @@ void DaemonFeature::checkPidFile() {
|
|||
// check if the pid-file exists
|
||||
if (!_pidFile.empty()) {
|
||||
if (FileUtils::isDirectory(_pidFile)) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' is a directory";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile
|
||||
<< "' is a directory";
|
||||
FATAL_ERROR_EXIT();
|
||||
} else if (FileUtils::exists(_pidFile) && FileUtils::size(_pidFile) > 0) {
|
||||
LOG_TOPIC(INFO, Logger::STARTUP) << "pid-file '" << _pidFile
|
||||
|
@ -150,7 +156,8 @@ void DaemonFeature::checkPidFile() {
|
|||
f >> oldPid;
|
||||
|
||||
if (oldPid == 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' is unreadable";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile
|
||||
<< "' is unreadable";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
@ -159,9 +166,9 @@ void DaemonFeature::checkPidFile() {
|
|||
int r = kill(oldPid, 0);
|
||||
|
||||
if (r == 0 || errno == EPERM) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile
|
||||
<< "' exists and process with pid " << oldPid
|
||||
<< " is still running, refusing to start twice";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "pid-file '" << _pidFile << "' exists and process with pid "
|
||||
<< oldPid << " is still running, refusing to start twice";
|
||||
FATAL_ERROR_EXIT();
|
||||
} else if (errno == ESRCH) {
|
||||
LOG_TOPIC(ERR, Logger::STARTUP) << "pid-file '" << _pidFile
|
||||
|
@ -169,25 +176,26 @@ void DaemonFeature::checkPidFile() {
|
|||
<< oldPid << " exists";
|
||||
|
||||
if (!FileUtils::remove(_pidFile)) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile
|
||||
<< "' exists, no process with pid " << oldPid
|
||||
<< " exists, but pid-file cannot be removed";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "pid-file '" << _pidFile << "' exists, no process with pid "
|
||||
<< oldPid << " exists, but pid-file cannot be removed";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
LOG_TOPIC(INFO, Logger::STARTUP) << "removed stale pid-file '"
|
||||
<< _pidFile << "'";
|
||||
} else {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile << "' exists and kill "
|
||||
<< oldPid << " failed";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "pid-file '" << _pidFile << "' exists and kill " << oldPid
|
||||
<< " failed";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
|
||||
// failed to open file
|
||||
else {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "pid-file '" << _pidFile
|
||||
<< "' exists, but cannot be opened";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "pid-file '" << _pidFile << "' exists, but cannot be opened";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
}
|
||||
|
@ -237,13 +245,17 @@ int DaemonFeature::forkProcess() {
|
|||
|
||||
// change the current working directory
|
||||
if (!_workingDirectory.empty()) {
|
||||
if (!FileUtils::changeDirectory(_workingDirectory)) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot change into working directory '"
|
||||
<< _workingDirectory << "'";
|
||||
FileResult res = FileUtils::changeDirectory(_workingDirectory);
|
||||
|
||||
if (!res.ok()) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::STARTUP)
|
||||
<< "cannot change into working directory '" << _workingDirectory
|
||||
<< "': " << res.errorMessage();
|
||||
FATAL_ERROR_EXIT();
|
||||
} else {
|
||||
LOG_TOPIC(INFO, arangodb::Logger::FIXME) << "changed working directory for child process to '"
|
||||
<< _workingDirectory << "'";
|
||||
LOG_TOPIC(INFO, arangodb::Logger::STARTUP)
|
||||
<< "changed working directory for child process to '"
|
||||
<< _workingDirectory << "'";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -257,17 +269,20 @@ int DaemonFeature::forkProcess() {
|
|||
}
|
||||
|
||||
if (dup2(fd, STDIN_FILENO) < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot re-map stdin to /dev/null";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot re-map stdin to /dev/null";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (dup2(fd, STDOUT_FILENO) < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot re-map stdout to /dev/null";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot re-map stdout to /dev/null";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
if (dup2(fd, STDERR_FILENO) < 0) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot re-map stderr to /dev/null";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME)
|
||||
<< "cannot re-map stderr to /dev/null";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
@ -280,7 +295,8 @@ void DaemonFeature::writePidFile(int pid) {
|
|||
std::ofstream out(_pidFile.c_str(), std::ios::trunc);
|
||||
|
||||
if (!out) {
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot write pid-file '" << _pidFile << "'";
|
||||
LOG_TOPIC(FATAL, arangodb::Logger::FIXME) << "cannot write pid-file '"
|
||||
<< _pidFile << "'";
|
||||
FATAL_ERROR_EXIT();
|
||||
}
|
||||
|
||||
|
|
|
@ -210,19 +210,6 @@ bool arangodb::basics::TRI_AttributeNamesHaveExpansion(
|
|||
/// @brief append the attribute name to an output stream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream,
|
||||
arangodb::basics::AttributeName const* name) {
|
||||
stream << name->name;
|
||||
if (name->shouldExpand) {
|
||||
stream << "[*]";
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append the attribute name to an output stream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream,
|
||||
arangodb::basics::AttributeName const& name) {
|
||||
stream << name.name;
|
||||
|
@ -236,23 +223,6 @@ std::ostream& operator<<(std::ostream& stream,
|
|||
/// @brief append the attribute names to an output stream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostream& operator<<(
|
||||
std::ostream& stream,
|
||||
std::vector<arangodb::basics::AttributeName> const* attributes) {
|
||||
size_t const n = attributes->size();
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
if (i > 0) {
|
||||
stream << ".";
|
||||
}
|
||||
stream << attributes[i];
|
||||
}
|
||||
return stream;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append the attribute names to an output stream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::ostream& operator<<(
|
||||
std::ostream& stream,
|
||||
std::vector<arangodb::basics::AttributeName> const& attributes) {
|
||||
|
|
|
@ -129,10 +129,7 @@ bool TRI_AttributeNamesHaveExpansion(std::vector<AttributeName> const& input);
|
|||
}
|
||||
}
|
||||
|
||||
std::ostream& operator<<(std::ostream&, arangodb::basics::AttributeName const*);
|
||||
std::ostream& operator<<(std::ostream&, arangodb::basics::AttributeName const&);
|
||||
std::ostream& operator<<(std::ostream&,
|
||||
std::vector<arangodb::basics::AttributeName> const*);
|
||||
std::ostream& operator<<(std::ostream&,
|
||||
std::vector<arangodb::basics::AttributeName> const&);
|
||||
|
||||
|
|
|
@ -0,0 +1,32 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dr. Frank Celler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "FileResult.h"
|
||||
|
||||
namespace arangodb {
|
||||
FileResult::FileResult(bool state)
|
||||
: Result(), _state(state), _sysErrorNumber(0) {}
|
||||
|
||||
FileResult::FileResult(bool state, int sysErrorNumber)
|
||||
: Result(TRI_ERROR_SYS_ERROR, strerror(sysErrorNumber)),
|
||||
_state(state), _sysErrorNumber(sysErrorNumber) {}
|
||||
}
|
|
@ -0,0 +1,44 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dr. Frank Celler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGODB_BASICS_FILE_RESULT_H
|
||||
#define ARANGODB_BASICS_FILE_RESULT_H 1
|
||||
|
||||
#include "Basics/Result.h"
|
||||
|
||||
namespace arangodb {
|
||||
class FileResult : public Result {
|
||||
public:
|
||||
explicit FileResult(bool state);
|
||||
FileResult(bool state, int sysErrorNumber);
|
||||
|
||||
public:
|
||||
bool state() const { return _state; }
|
||||
int sysErrorNumber() const { return _sysErrorNumber; }
|
||||
|
||||
private:
|
||||
bool const _state;
|
||||
int const _sysErrorNumber;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -360,7 +360,7 @@ bool copyDirectoryRecursive(std::string const& source,
|
|||
bool rc = true;
|
||||
|
||||
auto isSubDirectory = [](std::string const& name) -> bool {
|
||||
return isDirectory(name);
|
||||
return isDirectory(name);
|
||||
};
|
||||
#ifdef TRI_HAVE_WIN32_LIST_FILES
|
||||
struct _finddata_t oneItem;
|
||||
|
@ -557,8 +557,14 @@ std::string stripExtension(std::string const& path,
|
|||
return path;
|
||||
}
|
||||
|
||||
bool changeDirectory(std::string const& path) {
|
||||
return TRI_CHDIR(path.c_str()) == 0;
|
||||
FileResult changeDirectory(std::string const& path) {
|
||||
int res = TRI_CHDIR(path.c_str());
|
||||
|
||||
if (res == 0) {
|
||||
return FileResult(true);
|
||||
} else {
|
||||
return FileResult(false, errno);
|
||||
}
|
||||
}
|
||||
|
||||
std::string currentDirectory(int* errorNumber) {
|
||||
|
|
|
@ -25,7 +25,9 @@
|
|||
#define ARANGODB_BASICS_FILE_UTILS_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include "Basics/files.h"
|
||||
#include "Basics/FileResult.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace basics {
|
||||
|
@ -107,7 +109,7 @@ std::string stripExtension(std::string const& path,
|
|||
std::string const& extension);
|
||||
|
||||
// changes into directory
|
||||
bool changeDirectory(std::string const& path);
|
||||
FileResult changeDirectory(std::string const& path);
|
||||
|
||||
// returns the current directory
|
||||
std::string currentDirectory(int* errorNumber = 0);
|
||||
|
|
|
@ -0,0 +1,54 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Dr. Frank Celler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGODB_BASICS_RESULT_H
|
||||
#define ARANGODB_BASICS_RESULT_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
namespace arangodb {
|
||||
class Result {
|
||||
public:
|
||||
Result() : _errorNumber(TRI_ERROR_NO_ERROR) {}
|
||||
Result(int errorNumber, std::string const& errorMessage)
|
||||
: _errorNumber(errorNumber), _errorMessage(errorMessage) {}
|
||||
Result(int errorNumber, std::string&& errorMessage)
|
||||
: _errorNumber(errorNumber), _errorMessage(std::move(errorMessage)) {}
|
||||
|
||||
virtual ~Result() {}
|
||||
|
||||
public:
|
||||
// the default implementations are const, but subclasses might
|
||||
// really do more work to compute - for example - the error
|
||||
// string.
|
||||
|
||||
virtual bool ok() { return _errorNumber == TRI_ERROR_NO_ERROR; }
|
||||
virtual int errorNumber() { return _errorNumber; }
|
||||
virtual std::string errorMessage() { return _errorMessage; }
|
||||
|
||||
protected:
|
||||
int _errorNumber;
|
||||
std::string _errorMessage;
|
||||
};
|
||||
}
|
||||
|
||||
#endif
|
|
@ -26,7 +26,6 @@
|
|||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Zip/zip.h"
|
||||
|
||||
#include <sstream>
|
||||
|
|
|
@ -26,7 +26,7 @@
|
|||
#include <iostream>
|
||||
|
||||
std::ostream& operator<<(std::ostream& stream, arangodb::StringRef const& ref) {
|
||||
stream << std::string(ref.data(), ref.length());
|
||||
stream.write(ref.data(), ref.length());
|
||||
return stream;
|
||||
}
|
||||
|
||||
|
|
|
@ -114,6 +114,16 @@ void TRI_ShutdownDebugging();
|
|||
void TRI_FlushDebugging();
|
||||
void TRI_FlushDebugging(char const* file, int line, char const* message);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief dump pair contents to an ostream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename T1, typename T2>
|
||||
std::ostream& operator<<(std::ostream& stream, std::pair<T1, T2> const& obj) {
|
||||
stream << '(' << obj.first << ", " << obj.second << ')';
|
||||
return stream;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief dump vector contents to an ostream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -209,7 +219,7 @@ std::ostream& operator<<(std::ostream& stream,
|
|||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief dump unordered_map contents to an ostream
|
||||
/// @brief dump map contents to an ostream
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
template <typename K, typename V>
|
||||
|
|
|
@ -130,6 +130,7 @@ add_library(${LIB_ARANGO} STATIC
|
|||
Basics/ConditionVariable.cpp
|
||||
Basics/DataProtector.cpp
|
||||
Basics/Exceptions.cpp
|
||||
Basics/FileResult.cpp
|
||||
Basics/FileUtils.cpp
|
||||
Basics/HybridLogicalClock.cpp
|
||||
Basics/LocalTaskQueue.cpp
|
||||
|
|
|
@ -82,12 +82,6 @@ class LoggerStream {
|
|||
return *this;
|
||||
}
|
||||
|
||||
template <typename T1, typename T2>
|
||||
LoggerStream& operator<<(std::pair<T1, T2> const& obj) {
|
||||
_out << '(' << obj.first << ", " << obj.second << ')';
|
||||
return *this;
|
||||
}
|
||||
|
||||
private:
|
||||
std::stringstream _out;
|
||||
size_t _topicId;
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "Basics/StringUtils.h"
|
||||
#include "GeneralRequest.h"
|
||||
#include "Endpoint/Endpoint.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Rest/CommonDefines.h"
|
||||
|
||||
namespace arangodb {
|
||||
|
|
|
@ -32,7 +32,7 @@ class Endpoint;
|
|||
namespace communicator {
|
||||
class Destination {
|
||||
public:
|
||||
Destination(std::string const& url) : _url(url) {}
|
||||
explicit Destination(std::string const& url) : _url(url) {}
|
||||
|
||||
public:
|
||||
std::string const& url() const { return _url; }
|
||||
|
|
|
@ -44,7 +44,7 @@ std::unordered_map<std::string, std::string> const
|
|||
SimpleHttpClient::NO_HEADERS{};
|
||||
|
||||
/// @brief default value for max packet size
|
||||
size_t SimpleHttpClient::MaxPacketSize = 128 * 1024 * 1024;
|
||||
size_t SimpleHttpClient::MaxPacketSize = 256 * 1024 * 1024;
|
||||
|
||||
SimpleHttpClient::SimpleHttpClient(GeneralClientConnection* connection,
|
||||
double requestTimeout, bool warn)
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include <openssl/ssl.h>
|
||||
#include <openssl/err.h>
|
||||
#include "Basics/socket-utils.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Ssl/ssl-helper.h"
|
||||
|
||||
#undef TRACE_SSL_CONNECTIONS
|
||||
|
|
|
@ -36,7 +36,10 @@ add_executable(
|
|||
Cache/Rebalancer.cpp
|
||||
Cache/State.cpp
|
||||
Cache/TransactionalBucket.cpp
|
||||
Cache/TransactionWindow.cpp
|
||||
Cache/TransactionalCache.cpp
|
||||
Cache/TransactionalStore.cpp
|
||||
Cache/TransactionManager.cpp
|
||||
Cache/TransactionsWithBackingStore.cpp
|
||||
Geo/georeg.cpp
|
||||
main.cpp
|
||||
)
|
||||
|
|
|
@ -32,6 +32,9 @@
|
|||
#include "Cache/FrequencyBuffer.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <memory>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace arangodb::cache;
|
||||
|
||||
|
@ -54,7 +57,8 @@ SECTION("tst_uint8_t") {
|
|||
CHECK(uint8_t() == zero);
|
||||
|
||||
FrequencyBuffer<uint8_t> buffer(8);
|
||||
CHECK(buffer.memoryUsage() == sizeof(FrequencyBuffer<uint8_t>) + 8);
|
||||
CHECK(buffer.memoryUsage() ==
|
||||
sizeof(FrequencyBuffer<uint8_t>) + sizeof(std::vector<uint8_t>) + 8);
|
||||
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
buffer.insertRecord(two);
|
||||
|
@ -64,52 +68,73 @@ SECTION("tst_uint8_t") {
|
|||
}
|
||||
|
||||
auto frequencies = buffer.getFrequencies();
|
||||
CHECK(2ULL == frequencies->size());
|
||||
CHECK(static_cast<uint64_t>(2) == frequencies->size());
|
||||
CHECK(one == (*frequencies)[0].first);
|
||||
CHECK(2ULL == (*frequencies)[0].second);
|
||||
CHECK(static_cast<uint64_t>(2) == (*frequencies)[0].second);
|
||||
CHECK(two == (*frequencies)[1].first);
|
||||
CHECK(4ULL == (*frequencies)[1].second);
|
||||
CHECK(static_cast<uint64_t>(4) == (*frequencies)[1].second);
|
||||
|
||||
for (size_t i = 0; i < 8; i++) {
|
||||
buffer.insertRecord(one);
|
||||
}
|
||||
|
||||
frequencies = buffer.getFrequencies();
|
||||
CHECK(1ULL == frequencies->size());
|
||||
CHECK(static_cast<size_t>(1) == frequencies->size());
|
||||
CHECK(one == (*frequencies)[0].first);
|
||||
CHECK(8ULL == (*frequencies)[0].second);
|
||||
CHECK(static_cast<uint64_t>(8) == (*frequencies)[0].second);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test behavior with pointers
|
||||
/// @brief test behavior with shared_ptr
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_pointers") {
|
||||
uint8_t* zero = nullptr;
|
||||
uint8_t one = 1;
|
||||
uint8_t two = 2;
|
||||
struct cmp_weak_ptr {
|
||||
bool operator()(std::weak_ptr<int> const& left,
|
||||
std::weak_ptr<int> const& right) const {
|
||||
return !left.owner_before(right) && !right.owner_before(left);
|
||||
}
|
||||
};
|
||||
|
||||
struct hash_weak_ptr {
|
||||
size_t operator()(std::weak_ptr<int> const& wp) const {
|
||||
auto sp = wp.lock();
|
||||
return std::hash<decltype(sp)>()(sp);
|
||||
}
|
||||
};
|
||||
|
||||
typedef FrequencyBuffer<std::weak_ptr<int>, cmp_weak_ptr, hash_weak_ptr>
|
||||
BufferType;
|
||||
|
||||
std::shared_ptr<int> p0(nullptr);
|
||||
|
||||
// check that default construction is as expected
|
||||
typedef uint8_t* smallptr;
|
||||
CHECK(smallptr() == zero);
|
||||
CHECK(std::shared_ptr<int>() == p0);
|
||||
|
||||
FrequencyBuffer<uint8_t*> buffer(8);
|
||||
std::shared_ptr<int> p1(new int());
|
||||
*p1 = static_cast<int>(1);
|
||||
std::shared_ptr<int> p2(new int());
|
||||
*p2 = static_cast<int>(2);
|
||||
|
||||
BufferType buffer(8);
|
||||
CHECK(buffer.memoryUsage() ==
|
||||
sizeof(FrequencyBuffer<uint8_t*>) + (8 * sizeof(uint8_t*)));
|
||||
sizeof(BufferType) +
|
||||
sizeof(std::vector<std::weak_ptr<int>>) +
|
||||
(8 * sizeof(std::weak_ptr<int>)));
|
||||
|
||||
for (size_t i = 0; i < 4; i++) {
|
||||
buffer.insertRecord(&two);
|
||||
buffer.insertRecord(p1);
|
||||
}
|
||||
for (size_t i = 0; i < 2; i++) {
|
||||
buffer.insertRecord(&one);
|
||||
buffer.insertRecord(p2);
|
||||
}
|
||||
|
||||
auto frequencies = buffer.getFrequencies();
|
||||
CHECK(2ULL == frequencies->size());
|
||||
CHECK(&one == (*frequencies)[0].first);
|
||||
CHECK(2ULL == (*frequencies)[0].second);
|
||||
CHECK(&two == (*frequencies)[1].first);
|
||||
CHECK(4ULL == (*frequencies)[1].second);
|
||||
CHECK(static_cast<uint64_t>(2) == frequencies->size());
|
||||
CHECK(p2 == (*frequencies)[0].first.lock());
|
||||
CHECK(static_cast<uint64_t>(2) == (*frequencies)[0].second);
|
||||
CHECK(p1 == (*frequencies)[1].first.lock());
|
||||
CHECK(static_cast<uint64_t>(4) == (*frequencies)[1].second);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -84,8 +84,13 @@ SECTION("tst_mixed_load") {
|
|||
size_t threadCount = 4;
|
||||
std::vector<std::shared_ptr<Cache>> caches;
|
||||
for (size_t i = 0; i < cacheCount; i++) {
|
||||
caches.emplace_back(
|
||||
manager.createCache(Manager::CacheType::Plain, initialSize, true));
|
||||
auto res =
|
||||
/*manager.createCache(((i % 2 == 0) ? Manager::CacheType::Plain
|
||||
: Manager::CacheType::Transactional),
|
||||
initialSize, true);*/
|
||||
manager.createCache(Manager::CacheType::Plain, initialSize, true);
|
||||
TRI_ASSERT(res);
|
||||
caches.emplace_back(res);
|
||||
}
|
||||
|
||||
uint64_t chunkSize = 4 * 1024 * 1024;
|
||||
|
@ -193,11 +198,16 @@ SECTION("tst_lifecycle_chaos") {
|
|||
std::queue<std::shared_ptr<Cache>> caches;
|
||||
|
||||
for (uint64_t i = 0; i < operationCount; i++) {
|
||||
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(1UL));
|
||||
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(1));
|
||||
switch (r) {
|
||||
case 0: {
|
||||
caches.emplace(manager.createCache(Manager::CacheType::Plain,
|
||||
initialSize, true));
|
||||
auto res = manager.createCache(
|
||||
(i % 2 == 0) ? Manager::CacheType::Plain
|
||||
: Manager::CacheType::Transactional,
|
||||
initialSize, true);
|
||||
if (res) {
|
||||
caches.emplace(res);
|
||||
}
|
||||
}
|
||||
case 1:
|
||||
default: {
|
||||
|
|
|
@ -51,14 +51,8 @@ TEST_CASE("CCacheMetadataTest", "[cache]") {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_constructor") {
|
||||
uint64_t dummy;
|
||||
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
|
||||
[](Cache* p) -> void {});
|
||||
uint8_t dummyTable;
|
||||
uint32_t logSize = 1;
|
||||
uint64_t limit = 1024;
|
||||
|
||||
Metadata metadata(dummyCache, limit, &dummyTable, logSize);
|
||||
Metadata metadata(limit);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -69,26 +63,19 @@ SECTION("tst_getters") {
|
|||
uint64_t dummy;
|
||||
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
|
||||
[](Cache* p) -> void {});
|
||||
uint8_t dummyTable;
|
||||
uint32_t logSize = 1;
|
||||
uint64_t limit = 1024;
|
||||
|
||||
Metadata metadata(dummyCache, limit, &dummyTable, logSize);
|
||||
Metadata metadata(limit);
|
||||
metadata.link(dummyCache);
|
||||
|
||||
metadata.lock();
|
||||
|
||||
CHECK(dummyCache == metadata.cache());
|
||||
|
||||
CHECK(logSize == metadata.logSize());
|
||||
CHECK(0UL == metadata.auxiliaryLogSize());
|
||||
|
||||
CHECK(limit == metadata.softLimit());
|
||||
CHECK(limit == metadata.hardLimit());
|
||||
CHECK(0UL == metadata.usage());
|
||||
|
||||
CHECK(&dummyTable == metadata.table());
|
||||
CHECK(nullptr == metadata.auxiliaryTable());
|
||||
|
||||
metadata.unlock();
|
||||
}
|
||||
|
||||
|
@ -97,14 +84,9 @@ SECTION("tst_getters") {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_usage_limits") {
|
||||
uint64_t dummy;
|
||||
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
|
||||
[](Cache* p) -> void {});
|
||||
uint8_t dummyTable;
|
||||
uint32_t logSize = 1;
|
||||
bool success;
|
||||
|
||||
Metadata metadata(dummyCache, 1024ULL, &dummyTable, logSize);
|
||||
Metadata metadata(1024ULL);
|
||||
|
||||
metadata.lock();
|
||||
|
||||
|
@ -148,19 +130,19 @@ SECTION("tst_usage_limits") {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_migration") {
|
||||
uint64_t dummy;
|
||||
std::shared_ptr<Cache> dummyCache(reinterpret_cast<Cache*>(&dummy),
|
||||
[](Cache* p) -> void {});
|
||||
uint8_t dummyTable;
|
||||
uint8_t dummyAuxiliaryTable;
|
||||
uint32_t logSize = 1;
|
||||
uint32_t auxiliaryLogSize = 2;
|
||||
uint64_t limit = 1024;
|
||||
|
||||
Metadata metadata(dummyCache, limit, &dummyTable, logSize);
|
||||
Metadata metadata(limit);
|
||||
|
||||
metadata.lock();
|
||||
|
||||
metadata.grantAuxiliaryTable(&dummyTable, logSize);
|
||||
metadata.swapTables();
|
||||
|
||||
metadata.grantAuxiliaryTable(&dummyAuxiliaryTable, auxiliaryLogSize);
|
||||
CHECK(auxiliaryLogSize == metadata.auxiliaryLogSize());
|
||||
CHECK(&dummyAuxiliaryTable == metadata.auxiliaryTable());
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite for arangodb::cache::PlainBucket
|
||||
/// @brief test suite for arangodb::cache::PlainCache
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite for arangodb::cache::Manager
|
||||
/// @brief test suite for arangodb::cache::Rebalancer
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
|
@ -33,6 +33,8 @@
|
|||
#include "Cache/Manager.h"
|
||||
#include "Cache/PlainCache.h"
|
||||
#include "Cache/Rebalancer.h"
|
||||
#include "Cache/Transaction.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
|
||||
#include "MockScheduler.h"
|
||||
|
||||
|
@ -56,10 +58,10 @@ using namespace arangodb::cache;
|
|||
TEST_CASE("CCacheRebalancerTest", "[cache][!hide][longRunning]") {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test rebalancing (multi-threaded)
|
||||
/// @brief test rebalancing plain caches (multi-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_rebalancing") {
|
||||
SECTION("tst_rebalancing_plain") {
|
||||
uint64_t initialSize = 16ULL * 1024ULL;
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
|
@ -180,6 +182,145 @@ SECTION("tst_rebalancing") {
|
|||
RandomGenerator::shutdown();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test rebalancing transactional caches (multi-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_rebalancing_transactional") {
|
||||
uint64_t initialSize = 16ULL * 1024ULL;
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
Manager manager(scheduler.ioService(), 128ULL * 1024ULL * 1024ULL);
|
||||
Rebalancer rebalancer(&manager);
|
||||
|
||||
size_t cacheCount = 4;
|
||||
size_t threadCount = 4;
|
||||
std::vector<std::shared_ptr<Cache>> caches;
|
||||
for (size_t i = 0; i < cacheCount; i++) {
|
||||
caches.emplace_back(manager.createCache(Manager::CacheType::Transactional,
|
||||
initialSize, true));
|
||||
}
|
||||
|
||||
bool doneRebalancing = false;
|
||||
auto rebalanceWorker = [&rebalancer, &doneRebalancing]() -> void {
|
||||
while (!doneRebalancing) {
|
||||
bool rebalanced = rebalancer.rebalance();
|
||||
if (rebalanced) {
|
||||
usleep(500 * 1000);
|
||||
} else {
|
||||
usleep(100);
|
||||
}
|
||||
}
|
||||
};
|
||||
auto rebalancerThread = new std::thread(rebalanceWorker);
|
||||
|
||||
uint64_t chunkSize = 4 * 1024 * 1024;
|
||||
uint64_t initialInserts = 1 * 1024 * 1024;
|
||||
uint64_t operationCount = 4 * 1024 * 1024;
|
||||
std::atomic<uint64_t> hitCount(0);
|
||||
std::atomic<uint64_t> missCount(0);
|
||||
auto worker = [&manager, &caches, cacheCount, initialInserts, operationCount,
|
||||
&hitCount,
|
||||
&missCount](uint64_t lower, uint64_t upper) -> void {
|
||||
Transaction* tx = manager.beginTransaction(false);
|
||||
// fill with some initial data
|
||||
for (uint64_t i = 0; i < initialInserts; i++) {
|
||||
uint64_t item = lower + i;
|
||||
size_t cacheIndex = item % cacheCount;
|
||||
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
|
||||
&item, sizeof(uint64_t));
|
||||
bool ok = caches[cacheIndex]->insert(value);
|
||||
if (!ok) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
|
||||
// initialize valid range for keys that *might* be in cache
|
||||
uint64_t validLower = lower;
|
||||
uint64_t validUpper = lower + initialInserts - 1;
|
||||
uint64_t blacklistUpper = validUpper;
|
||||
|
||||
// commence mixed workload
|
||||
for (uint64_t i = 0; i < operationCount; i++) {
|
||||
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(99UL));
|
||||
|
||||
if (r >= 99) { // remove something
|
||||
if (validLower == validUpper) {
|
||||
continue; // removed too much
|
||||
}
|
||||
|
||||
uint64_t item = validLower++;
|
||||
size_t cacheIndex = item % cacheCount;
|
||||
|
||||
caches[cacheIndex]->remove(&item, sizeof(uint64_t));
|
||||
} else if (r >= 90) { // insert something
|
||||
if (validUpper == upper) {
|
||||
continue; // already maxed out range
|
||||
}
|
||||
|
||||
uint64_t item = ++validUpper;
|
||||
if (validUpper > blacklistUpper) {
|
||||
blacklistUpper = validUpper;
|
||||
}
|
||||
size_t cacheIndex = item % cacheCount;
|
||||
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
|
||||
&item, sizeof(uint64_t));
|
||||
bool ok = caches[cacheIndex]->insert(value);
|
||||
if (!ok) {
|
||||
delete value;
|
||||
}
|
||||
} else if (r >= 80) { // blacklist something
|
||||
if (blacklistUpper == upper) {
|
||||
continue; // already maxed out range
|
||||
}
|
||||
|
||||
uint64_t item = ++blacklistUpper;
|
||||
size_t cacheIndex = item % cacheCount;
|
||||
caches[cacheIndex]->blacklist(&item, sizeof(uint64_t));
|
||||
} else { // lookup something
|
||||
uint64_t item = RandomGenerator::interval(
|
||||
static_cast<int64_t>(validLower), static_cast<int64_t>(validUpper));
|
||||
size_t cacheIndex = item % cacheCount;
|
||||
|
||||
Cache::Finding f = caches[cacheIndex]->find(&item, sizeof(uint64_t));
|
||||
if (f.found()) {
|
||||
hitCount++;
|
||||
TRI_ASSERT(f.value() != nullptr);
|
||||
TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t)));
|
||||
} else {
|
||||
missCount++;
|
||||
TRI_ASSERT(f.value() == nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.endTransaction(tx);
|
||||
};
|
||||
|
||||
std::vector<std::thread*> threads;
|
||||
// dispatch threads
|
||||
for (size_t i = 0; i < threadCount; i++) {
|
||||
uint64_t lower = i * chunkSize;
|
||||
uint64_t upper = ((i + 1) * chunkSize) - 1;
|
||||
threads.push_back(new std::thread(worker, lower, upper));
|
||||
}
|
||||
|
||||
// join threads
|
||||
for (auto t : threads) {
|
||||
t->join();
|
||||
delete t;
|
||||
}
|
||||
|
||||
doneRebalancing = true;
|
||||
rebalancerThread->join();
|
||||
delete rebalancerThread;
|
||||
|
||||
for (auto cache : caches) {
|
||||
manager.destroyCache(cache);
|
||||
}
|
||||
|
||||
RandomGenerator::shutdown();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief generate tests
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite for arangodb::cache::TransactionWindow
|
||||
/// @brief test suite for arangodb::cache::TransactionManager
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
|
@ -29,7 +29,8 @@
|
|||
|
||||
#include "catch.hpp"
|
||||
|
||||
#include "Cache/TransactionWindow.h"
|
||||
#include "Cache/Transaction.h"
|
||||
#include "Cache/TransactionManager.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <iostream>
|
||||
|
@ -44,30 +45,55 @@ using namespace arangodb::cache;
|
|||
/// @brief setup
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST_CASE("CCacheTransactionWindowTest", "[cache]") {
|
||||
TEST_CASE("CCacheTransactionManagerTest", "[cache]") {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test transaction term management
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_transaction_term") {
|
||||
TransactionWindow transactions;
|
||||
TransactionManager transactions;
|
||||
Transaction* tx1;
|
||||
Transaction* tx2;
|
||||
Transaction* tx3;
|
||||
|
||||
CHECK(0ULL == transactions.term());
|
||||
|
||||
transactions.start();
|
||||
tx1 = transactions.begin(false);
|
||||
CHECK(1ULL == transactions.term());
|
||||
transactions.end();
|
||||
transactions.end(tx1);
|
||||
CHECK(2ULL == transactions.term());
|
||||
|
||||
transactions.start();
|
||||
tx1 = transactions.begin(false);
|
||||
CHECK(3ULL == transactions.term());
|
||||
transactions.start();
|
||||
tx2 = transactions.begin(false);
|
||||
CHECK(3ULL == transactions.term());
|
||||
transactions.end();
|
||||
transactions.end(tx1);
|
||||
CHECK(3ULL == transactions.term());
|
||||
transactions.end();
|
||||
transactions.end(tx2);
|
||||
CHECK(4ULL == transactions.term());
|
||||
|
||||
tx1 = transactions.begin(true);
|
||||
CHECK(4ULL == transactions.term());
|
||||
tx2 = transactions.begin(false);
|
||||
CHECK(5ULL == transactions.term());
|
||||
transactions.end(tx2);
|
||||
CHECK(5ULL == transactions.term());
|
||||
transactions.end(tx1);
|
||||
CHECK(6ULL == transactions.term());
|
||||
|
||||
tx1 = transactions.begin(true);
|
||||
CHECK(6ULL == transactions.term());
|
||||
tx2 = transactions.begin(false);
|
||||
CHECK(7ULL == transactions.term());
|
||||
transactions.end(tx2);
|
||||
CHECK(7ULL == transactions.term());
|
||||
tx3 = transactions.begin(true);
|
||||
CHECK(7ULL == transactions.term());
|
||||
transactions.end(tx1);
|
||||
CHECK(8ULL == transactions.term());
|
||||
transactions.end(tx3);
|
||||
CHECK(8ULL == transactions.term());
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
|
@ -261,7 +261,7 @@ SECTION("tst_blacklist") {
|
|||
sizeof(uint64_t));
|
||||
}
|
||||
|
||||
success = bucket.lock(0, -1LL);
|
||||
success = bucket.lock(1ULL, -1LL);
|
||||
CHECK(success);
|
||||
|
||||
// insert three to fill
|
||||
|
|
|
@ -0,0 +1,415 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite for arangodb::cache::TransactionalCache
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2017 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Daniel H. Larkin
|
||||
/// @author Copyright 2017, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Random/RandomGenerator.h"
|
||||
|
||||
#include "catch.hpp"
|
||||
|
||||
#include "Cache/Manager.h"
|
||||
#include "Cache/Transaction.h"
|
||||
#include "Cache/TransactionalCache.h"
|
||||
|
||||
#include "MockScheduler.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <string>
|
||||
#include <thread>
|
||||
#include <vector>
|
||||
|
||||
#include <iostream>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::cache;
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
// --SECTION-- test suite
|
||||
// -----------------------------------------------------------------------------
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief setup
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TEST_CASE("CCacheTransactionalCacheTest",
|
||||
"[cache][!hide][longRunning]") {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test construction (single-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_st_construction") {
|
||||
Manager manager(nullptr, 1024ULL * 1024ULL);
|
||||
auto cache1 = manager.createCache(Manager::CacheType::Transactional,
|
||||
256ULL * 1024ULL, false);
|
||||
auto cache2 = manager.createCache(Manager::CacheType::Transactional,
|
||||
512ULL * 1024ULL, false);
|
||||
|
||||
CHECK(0ULL == cache1->usage());
|
||||
CHECK(256ULL * 1024ULL == cache1->limit());
|
||||
CHECK(0ULL == cache2->usage());
|
||||
CHECK(512ULL * 1024ULL > cache2->limit());
|
||||
|
||||
manager.destroyCache(cache1);
|
||||
manager.destroyCache(cache2);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test insertion (single-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_st_insertion") {
|
||||
uint64_t cacheLimit = 256ULL * 1024ULL;
|
||||
Manager manager(nullptr, 4ULL * cacheLimit);
|
||||
auto cache =
|
||||
manager.createCache(Manager::CacheType::Transactional, cacheLimit, false);
|
||||
|
||||
for (uint64_t i = 0; i < 1024; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
CHECK(success);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
}
|
||||
|
||||
for (uint64_t i = 0; i < 1024; i++) {
|
||||
uint64_t j = 2 * i;
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &j, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
CHECK(success);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
CHECK(0 == memcmp(f.value()->value(), &j, sizeof(uint64_t)));
|
||||
}
|
||||
|
||||
uint64_t notInserted = 0;
|
||||
for (uint64_t i = 1024; i < 128 * 1024; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
if (success) {
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
} else {
|
||||
delete value;
|
||||
notInserted++;
|
||||
}
|
||||
}
|
||||
CHECK(notInserted > 0);
|
||||
|
||||
manager.destroyCache(cache);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test removal (single-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_st_removal") {
|
||||
uint64_t cacheLimit = 256ULL * 1024ULL;
|
||||
Manager manager(nullptr, 4ULL * cacheLimit);
|
||||
auto cache =
|
||||
manager.createCache(Manager::CacheType::Transactional, cacheLimit, false);
|
||||
|
||||
for (uint64_t i = 0; i < 1024; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
CHECK(success);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
CHECK(f.value() != nullptr);
|
||||
CHECK(f.value()->sameKey(&i, sizeof(uint64_t)));
|
||||
}
|
||||
|
||||
// test removal of bogus keys
|
||||
for (uint64_t i = 1024; i < 2048; i++) {
|
||||
bool removed = cache->remove(&i, sizeof(uint64_t));
|
||||
REQUIRE(removed);
|
||||
// ensure existing keys not removed
|
||||
for (uint64_t j = 0; j < 1024; j++) {
|
||||
auto f = cache->find(&j, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
CHECK(f.value() != nullptr);
|
||||
CHECK(f.value()->sameKey(&j, sizeof(uint64_t)));
|
||||
}
|
||||
}
|
||||
|
||||
// remove actual keys
|
||||
for (uint64_t i = 0; i < 1024; i++) {
|
||||
bool removed = cache->remove(&i, sizeof(uint64_t));
|
||||
CHECK(removed);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(!f.found());
|
||||
}
|
||||
|
||||
manager.destroyCache(cache);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test blacklisting (single-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_st_blacklist") {
|
||||
uint64_t cacheLimit = 256ULL * 1024ULL;
|
||||
Manager manager(nullptr, 4ULL * cacheLimit);
|
||||
auto cache =
|
||||
manager.createCache(Manager::CacheType::Transactional, cacheLimit, false);
|
||||
|
||||
Transaction* tx = manager.beginTransaction(false);
|
||||
|
||||
for (uint64_t i = 0; i < 1024; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
CHECK(success);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
CHECK(f.value() != nullptr);
|
||||
CHECK(f.value()->sameKey(&i, sizeof(uint64_t)));
|
||||
}
|
||||
|
||||
for (uint64_t i = 512; i < 1024; i++) {
|
||||
bool success = cache->blacklist(&i, sizeof(uint64_t));
|
||||
CHECK(success);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(!f.found());
|
||||
}
|
||||
|
||||
for (uint64_t i = 512; i < 1024; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
CHECK(!success);
|
||||
delete value;
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(!f.found());
|
||||
}
|
||||
|
||||
manager.endTransaction(tx);
|
||||
tx = manager.beginTransaction(false);
|
||||
|
||||
for (uint64_t i = 512; i < 1024; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
CHECK(success);
|
||||
auto f = cache->find(&i, sizeof(uint64_t));
|
||||
CHECK(f.found());
|
||||
}
|
||||
|
||||
manager.endTransaction(tx);
|
||||
manager.destroyCache(cache);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test growth behavior (single-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_st_growth") {
|
||||
uint64_t initialSize = 16ULL * 1024ULL;
|
||||
uint64_t minimumSize = 64ULL * initialSize;
|
||||
MockScheduler scheduler(4);
|
||||
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
|
||||
auto cache =
|
||||
manager.createCache(Manager::CacheType::Transactional, initialSize, true);
|
||||
|
||||
for (uint64_t i = 0; i < 4ULL * 1024ULL * 1024ULL; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
if (!success) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
|
||||
CHECK(cache->usage() > minimumSize);
|
||||
|
||||
manager.destroyCache(cache);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test shrink behavior (single-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_st_shrink") {
|
||||
uint64_t initialSize = 16ULL * 1024ULL;
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
|
||||
auto cache =
|
||||
manager.createCache(Manager::CacheType::Transactional, initialSize, true);
|
||||
|
||||
for (uint64_t i = 0; i < 16ULL * 1024ULL * 1024ULL; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
if (!success) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
|
||||
cache->disableGrowth();
|
||||
uint64_t target = cache->usage() / 2;
|
||||
while (!cache->resize(target)) {
|
||||
};
|
||||
|
||||
for (uint64_t i = 0; i < 16ULL * 1024ULL * 1024ULL; i++) {
|
||||
CachedValue* value =
|
||||
CachedValue::construct(&i, sizeof(uint64_t), &i, sizeof(uint64_t));
|
||||
bool success = cache->insert(value);
|
||||
if (!success) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
|
||||
while (cache->isResizing()) {
|
||||
}
|
||||
CHECK(cache->usage() <= target);
|
||||
|
||||
manager.destroyCache(cache);
|
||||
RandomGenerator::shutdown();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test mixed load behavior (multi-threaded)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
SECTION("tst_mt_mixed_load") {
|
||||
uint64_t initialSize = 16ULL * 1024ULL;
|
||||
RandomGenerator::initialize(RandomGenerator::RandomType::MERSENNE);
|
||||
MockScheduler scheduler(4);
|
||||
Manager manager(scheduler.ioService(), 1024ULL * 1024ULL * 1024ULL);
|
||||
size_t threadCount = 4;
|
||||
std::shared_ptr<Cache> cache =
|
||||
manager.createCache(Manager::CacheType::Transactional, initialSize, true);
|
||||
|
||||
uint64_t chunkSize = 16 * 1024 * 1024;
|
||||
uint64_t initialInserts = 4 * 1024 * 1024;
|
||||
uint64_t operationCount = 16 * 1024 * 1024;
|
||||
std::atomic<uint64_t> hitCount(0);
|
||||
std::atomic<uint64_t> missCount(0);
|
||||
auto worker = [&manager, &cache, initialInserts, operationCount, &hitCount,
|
||||
&missCount](uint64_t lower, uint64_t upper) -> void {
|
||||
Transaction* tx = manager.beginTransaction(false);
|
||||
// fill with some initial data
|
||||
for (uint64_t i = 0; i < initialInserts; i++) {
|
||||
uint64_t item = lower + i;
|
||||
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
|
||||
&item, sizeof(uint64_t));
|
||||
bool ok = cache->insert(value);
|
||||
if (!ok) {
|
||||
delete value;
|
||||
}
|
||||
}
|
||||
|
||||
// initialize valid range for keys that *might* be in cache
|
||||
uint64_t validLower = lower;
|
||||
uint64_t validUpper = lower + initialInserts - 1;
|
||||
uint64_t blacklistUpper = validUpper;
|
||||
|
||||
// commence mixed workload
|
||||
for (uint64_t i = 0; i < operationCount; i++) {
|
||||
uint32_t r = RandomGenerator::interval(static_cast<uint32_t>(99UL));
|
||||
|
||||
if (r >= 99) { // remove something
|
||||
if (validLower == validUpper) {
|
||||
continue; // removed too much
|
||||
}
|
||||
|
||||
uint64_t item = validLower++;
|
||||
|
||||
cache->remove(&item, sizeof(uint64_t));
|
||||
} else if (r >= 90) { // insert something
|
||||
if (validUpper == upper) {
|
||||
continue; // already maxed out range
|
||||
}
|
||||
|
||||
uint64_t item = ++validUpper;
|
||||
if (validUpper > blacklistUpper) {
|
||||
blacklistUpper = validUpper;
|
||||
}
|
||||
CachedValue* value = CachedValue::construct(&item, sizeof(uint64_t),
|
||||
&item, sizeof(uint64_t));
|
||||
bool ok = cache->insert(value);
|
||||
if (!ok) {
|
||||
delete value;
|
||||
}
|
||||
} else if (r >= 80) { // blacklist something
|
||||
if (blacklistUpper == upper) {
|
||||
continue; // already maxed out range
|
||||
}
|
||||
|
||||
uint64_t item = ++blacklistUpper;
|
||||
cache->blacklist(&item, sizeof(uint64_t));
|
||||
} else { // lookup something
|
||||
uint64_t item = RandomGenerator::interval(
|
||||
static_cast<int64_t>(validLower), static_cast<int64_t>(validUpper));
|
||||
|
||||
Cache::Finding f = cache->find(&item, sizeof(uint64_t));
|
||||
if (f.found()) {
|
||||
hitCount++;
|
||||
TRI_ASSERT(f.value() != nullptr);
|
||||
TRI_ASSERT(f.value()->sameKey(&item, sizeof(uint64_t)));
|
||||
} else {
|
||||
missCount++;
|
||||
TRI_ASSERT(f.value() == nullptr);
|
||||
}
|
||||
}
|
||||
}
|
||||
manager.endTransaction(tx);
|
||||
};
|
||||
|
||||
std::vector<std::thread*> threads;
|
||||
// dispatch threads
|
||||
for (size_t i = 0; i < threadCount; i++) {
|
||||
uint64_t lower = i * chunkSize;
|
||||
uint64_t upper = ((i + 1) * chunkSize) - 1;
|
||||
threads.push_back(new std::thread(worker, lower, upper));
|
||||
}
|
||||
|
||||
// join threads
|
||||
for (auto t : threads) {
|
||||
t->join();
|
||||
delete t;
|
||||
}
|
||||
|
||||
manager.destroyCache(cache);
|
||||
RandomGenerator::shutdown();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief generate tests
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
}
|
||||
|
||||
// Local Variables:
|
||||
// mode: outline-minor
|
||||
// outline-regexp: "^\\(/// @brief\\|/// {@inheritDoc}\\|/// @addtogroup\\|//
|
||||
// --SECTION--\\|/// @\\}\\)"
|
||||
// End:
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue