mirror of https://gitee.com/bigwinds/arangodb
Merge remote-tracking branch 'origin/devel' into feature/ldap-auth
This commit is contained in:
commit
c2458f845a
|
@ -119,7 +119,10 @@ add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/curl/curl-7.50.3)
|
|||
if (${JEMALLOC_FOUND})
|
||||
# set(USE_JEMALLOC_DEFAULT 1)
|
||||
endif()
|
||||
|
||||
set(SKIP_INSTALL_ALL ON)
|
||||
set(FAIL_ON_WARNINGS OFF CACHE BOOL "do not enable -Werror")
|
||||
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/snappy/google-snappy-d53de18)
|
||||
add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/rocksdb)
|
||||
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit a0f4af39229ed6b567745149633d1a75c7f548d7
|
||||
Subproject commit d8194287a651af48b6bedad4cda64e65e9f8f71f
|
|
@ -154,6 +154,10 @@ v3.2.alpha1 (2017-02-05)
|
|||
v3.1.14 (2017-03-13)
|
||||
--------------------
|
||||
|
||||
* added missing locks to authentication cache methods
|
||||
|
||||
* ui - added feature request (multiple start nodes within graph viewer) #2317
|
||||
|
||||
* ui - fixed wrong merge of statistics information from different coordinators
|
||||
|
||||
* ui - fixed issue #2316
|
||||
|
|
|
@ -53,7 +53,7 @@ Graph Viewer Options
|
|||
|
||||
Graph Options Menu:
|
||||
|
||||
- Startnode (string - valid node id): Heart of your graph. Rendering and traversing will start from here. Empty value means: a random starting point will be used.
|
||||
- Startnode (string - valid node id or space seperated list of id's): Heart of your graph. Rendering and traversing will start from here. Empty value means: a random starting point will be used.
|
||||
- Layout: Different graph layouting algoritms. No overlap (optimal: big graph), force layout (optimal: medium graph), fruchtermann (optimal: little to medium graph).
|
||||
- Renderer: Canvas mode allows editing. WebGL currently offers only display mode (a lot faster with much nodes/edges).
|
||||
- Search depth (number): Search depth which is starting from your start node.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
.TH arangod 8 "3.1.devel" "ArangoDB" "ArangoDB"
|
||||
.TH arangod 8 "3.2.devel" "ArangoDB" "ArangoDB"
|
||||
.SH NAME
|
||||
arangod - the ArangoDB database server
|
||||
.SH SYNOPSIS
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
.TH foxx-manager 8 "3.1.devel" "ArangoDB" "ArangoDB"
|
||||
.TH foxx-manager 8 "3.2.devel" "ArangoDB" "ArangoDB"
|
||||
.SH NAME
|
||||
foxx-manager - a Foxx application manager for ArangoDB
|
||||
.SH SYNOPSIS
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
.TH rcarangod 8 "3.1.devel" "ArangoDB" "ArangoDB"
|
||||
.TH rcarangod 8 "3.2.devel" "ArangoDB" "ArangoDB"
|
||||
.SH NAME
|
||||
rcarangod - control script for the ArangoDB database server
|
||||
.SH SYNOPSIS
|
||||
|
|
|
@ -41,8 +41,8 @@ AgencyFeature::AgencyFeature(application_features::ApplicationServer* server)
|
|||
_activated(false),
|
||||
_size(1),
|
||||
_poolSize(1),
|
||||
_minElectionTimeout(0.5),
|
||||
_maxElectionTimeout(2.5),
|
||||
_minElectionTimeout(1.0),
|
||||
_maxElectionTimeout(5.0),
|
||||
_supervision(false),
|
||||
_waitForSync(true),
|
||||
_supervisionFrequency(5.0),
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
#include <chrono>
|
||||
#include <thread>
|
||||
|
||||
#include "Agency/GossipCallback.h"
|
||||
#include "Basics/ConditionLocker.h"
|
||||
|
@ -628,6 +629,7 @@ query_t Agent::lastAckedAgo() const {
|
|||
}
|
||||
|
||||
trans_ret_t Agent::transact(query_t const& queries) {
|
||||
|
||||
arangodb::consensus::index_t maxind = 0; // maximum write index
|
||||
|
||||
auto leader = _constituent.leaderID();
|
||||
|
@ -760,6 +762,7 @@ inquire_ret_t Agent::inquire(query_t const& query) {
|
|||
|
||||
/// Write new entries to replicated state and store
|
||||
write_ret_t Agent::write(query_t const& query) {
|
||||
|
||||
std::vector<bool> applied;
|
||||
std::vector<index_t> indices;
|
||||
auto multihost = size()>1;
|
||||
|
@ -1160,8 +1163,8 @@ void Agent::notify(query_t const& message) {
|
|||
|
||||
_config.update(message);
|
||||
|
||||
_state.persistActiveAgents(_config.activeToBuilder(),
|
||||
_config.poolToBuilder());
|
||||
_state.persistActiveAgents(_config.activeToBuilder(), _config.poolToBuilder());
|
||||
|
||||
}
|
||||
|
||||
// Rebuild key value stores
|
||||
|
@ -1174,21 +1177,16 @@ arangodb::consensus::index_t Agent::rebuildDBs() {
|
|||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Rebuilding key-value stores from index "
|
||||
<< _lastAppliedIndex << " to " << _leaderCommitIndex;
|
||||
|
||||
_spearhead.apply(
|
||||
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1),
|
||||
_leaderCommitIndex, _constituent.term());
|
||||
|
||||
_readDB.apply(
|
||||
_state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1),
|
||||
_leaderCommitIndex, _constituent.term());
|
||||
|
||||
_compacted.apply(
|
||||
_state.slices(_lastCompactionIndex+1, _leaderCommitIndex+1),
|
||||
_leaderCommitIndex, _constituent.term());
|
||||
|
||||
auto logs = _state.slices(_lastAppliedIndex+1, _leaderCommitIndex+1);
|
||||
|
||||
_spearhead.apply(logs, _leaderCommitIndex, _constituent.term());
|
||||
_readDB.apply(logs, _leaderCommitIndex, _constituent.term());
|
||||
|
||||
LOG_TOPIC(TRACE, Logger::AGENCY)
|
||||
<< "ReadDB: " << _readDB;
|
||||
|
||||
_lastAppliedIndex = _leaderCommitIndex;
|
||||
_lastCompactionIndex = _leaderCommitIndex;
|
||||
|
||||
return _lastAppliedIndex;
|
||||
|
||||
|
|
|
@ -39,43 +39,25 @@ AgentCallback::AgentCallback(Agent* agent, std::string const& slaveID,
|
|||
void AgentCallback::shutdown() { _agent = 0; }
|
||||
|
||||
bool AgentCallback::operator()(arangodb::ClusterCommResult* res) {
|
||||
|
||||
if (res->status == CL_COMM_SENT) {
|
||||
|
||||
if (_agent) {
|
||||
|
||||
try { // Check success
|
||||
if (res->result->getBodyVelocyPack()->slice().get("success").getBool()) {
|
||||
_agent->reportIn(_slaveID, _last, _toLog);
|
||||
}
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||
<< "success: true " << res->result->getBodyVelocyPack()->toJson();
|
||||
} catch (...) {
|
||||
LOG_TOPIC(INFO, Logger::CLUSTER)
|
||||
<< "success: false" << res->result->getBodyVelocyPack()->toJson();
|
||||
}
|
||||
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::CLUSTER)
|
||||
<< res->result->getBodyVelocyPack()->toJson();
|
||||
_agent->reportIn(_slaveID, _last, _toLog);
|
||||
}
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Got good callback from AppendEntriesRPC: "
|
||||
<< "comm_status(" << res->status
|
||||
<< "), last(" << _last << "), follower("
|
||||
<< _slaveID << "), time("
|
||||
<< TRI_microtime() - _startTime << ")";
|
||||
|
||||
} else {
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Got bad callback from AppendEntriesRPC: "
|
||||
<< "comm_status(" << res->status
|
||||
<< "), last(" << _last << "), follower("
|
||||
<< _slaveID << "), time("
|
||||
<< TRI_microtime() - _startTime << ")";
|
||||
|
||||
}
|
||||
|
||||
return true;
|
||||
|
||||
}
|
||||
|
|
|
@ -87,13 +87,6 @@ Constituent::~Constituent() { shutdown(); }
|
|||
/// Wait for sync
|
||||
bool Constituent::waitForSync() const { return _agent->config().waitForSync(); }
|
||||
|
||||
/// Random sleep times in election process
|
||||
duration_t Constituent::sleepFor(double min_t, double max_t) {
|
||||
int32_t left = static_cast<int32_t>(1000.0 * min_t),
|
||||
right = static_cast<int32_t>(1000.0 * max_t);
|
||||
return duration_t(static_cast<long>(RandomGenerator::interval(left, right)));
|
||||
}
|
||||
|
||||
/// Get my term
|
||||
term_t Constituent::term() const {
|
||||
MUTEX_LOCKER(guard, _castLock);
|
||||
|
@ -116,33 +109,36 @@ void Constituent::termNoLock(term_t t) {
|
|||
<< roleStr[_role] << " new term " << t;
|
||||
|
||||
_cast = false;
|
||||
Builder body;
|
||||
body.add(VPackValue(VPackValueType::Object));
|
||||
std::ostringstream i_str;
|
||||
i_str << std::setw(20) << std::setfill('0') << t;
|
||||
body.add("_key", Value(i_str.str()));
|
||||
body.add("term", Value(t));
|
||||
body.add("voted_for", Value(_votedFor));
|
||||
body.close();
|
||||
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
auto transactionContext =
|
||||
if (!_votedFor.empty()) {
|
||||
Builder body;
|
||||
body.add(VPackValue(VPackValueType::Object));
|
||||
std::ostringstream i_str;
|
||||
i_str << std::setw(20) << std::setfill('0') << t;
|
||||
body.add("_key", Value(i_str.str()));
|
||||
body.add("term", Value(t));
|
||||
body.add("voted_for", Value(_votedFor));
|
||||
body.close();
|
||||
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
auto transactionContext =
|
||||
std::make_shared<transaction::StandaloneContext>(_vocbase);
|
||||
SingleCollectionTransaction trx(transactionContext, "election",
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
int res = trx.begin();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
SingleCollectionTransaction trx(transactionContext, "election",
|
||||
AccessMode::Type::WRITE);
|
||||
|
||||
int res = trx.begin();
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(res);
|
||||
}
|
||||
|
||||
OperationOptions options;
|
||||
options.waitForSync = _agent->config().waitForSync();
|
||||
options.silent = true;
|
||||
|
||||
OperationResult result = trx.insert("election", body.slice(), options);
|
||||
trx.finish(result.code);
|
||||
}
|
||||
|
||||
OperationOptions options;
|
||||
options.waitForSync = false;
|
||||
options.silent = true;
|
||||
|
||||
OperationResult result = trx.insert("election", body.slice(), options);
|
||||
trx.finish(result.code);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -701,25 +701,12 @@ void Node::toBuilder(Builder& builder, bool showHidden) const {
|
|||
|
||||
// Print internals to ostream
|
||||
std::ostream& Node::print(std::ostream& o) const {
|
||||
Node const* par = _parent;
|
||||
while (par != nullptr) {
|
||||
par = par->_parent;
|
||||
o << " ";
|
||||
Builder builder;
|
||||
{
|
||||
VPackArrayBuilder b(&builder);
|
||||
toBuilder(builder);
|
||||
}
|
||||
|
||||
o << _node_name << " : ";
|
||||
|
||||
if (type() == NODE) {
|
||||
o << std::endl;
|
||||
for (auto const& i : _children) o << *(i.second);
|
||||
} else {
|
||||
o << ((slice().isNone()) ? "NONE" : slice().toJson());
|
||||
if (_ttl != std::chrono::system_clock::time_point()) {
|
||||
o << " ttl! ";
|
||||
}
|
||||
o << std::endl;
|
||||
}
|
||||
|
||||
o << builder.toJson();
|
||||
return o;
|
||||
}
|
||||
|
||||
|
|
|
@ -315,15 +315,15 @@ std::vector<log_t> State::get(arangodb::consensus::index_t start,
|
|||
return entries;
|
||||
}
|
||||
|
||||
if (end == (std::numeric_limits<uint64_t>::max)() || end > _log.back().index) {
|
||||
end = _log.back().index;
|
||||
if (end == (std::numeric_limits<uint64_t>::max)() || end > _log.size() - 1) {
|
||||
end = _log.size() - 1;
|
||||
}
|
||||
|
||||
if (start < _log[0].index) {
|
||||
start = _log[0].index;
|
||||
}
|
||||
|
||||
for (size_t i = start - _cur; i <= end - _cur; ++i) {
|
||||
for (size_t i = start - _cur; i <= end; ++i) {
|
||||
entries.push_back(_log[i]);
|
||||
}
|
||||
|
||||
|
|
|
@ -71,7 +71,6 @@ bool PlainCache::insert(CachedValue* value) {
|
|||
|
||||
if (ok) {
|
||||
bool allowed = true;
|
||||
bool eviction = false;
|
||||
bool maybeMigrate = false;
|
||||
int64_t change = static_cast<int64_t>(value->size());
|
||||
CachedValue* candidate = bucket->find(hash, value->key(), value->keySize);
|
||||
|
@ -93,6 +92,7 @@ bool PlainCache::insert(CachedValue* value) {
|
|||
_metadata.unlock();
|
||||
|
||||
if (allowed) {
|
||||
bool eviction = false;
|
||||
if (candidate != nullptr) {
|
||||
bucket->evict(candidate, true);
|
||||
freeValue(candidate);
|
||||
|
@ -121,7 +121,6 @@ bool PlainCache::insert(CachedValue* value) {
|
|||
bool PlainCache::remove(void const* key, uint32_t keySize) {
|
||||
TRI_ASSERT(key != nullptr);
|
||||
bool removed = false;
|
||||
bool maybeMigrate = false;
|
||||
uint32_t hash = hashKey(key, keySize);
|
||||
|
||||
bool ok;
|
||||
|
@ -130,6 +129,7 @@ bool PlainCache::remove(void const* key, uint32_t keySize) {
|
|||
std::tie(ok, bucket, source) = getBucket(hash, Cache::triesSlow);
|
||||
|
||||
if (ok) {
|
||||
bool maybeMigrate = false;
|
||||
CachedValue* candidate = bucket->remove(hash, key, keySize);
|
||||
|
||||
if (candidate != nullptr) {
|
||||
|
|
|
@ -180,7 +180,7 @@ std::unique_ptr<Table::Subtable> Table::auxiliaryBuckets(uint32_t index) {
|
|||
uint32_t diff = _auxiliary->_logSize - _logSize;
|
||||
base = &(_auxiliary->_buckets[index << diff]);
|
||||
size = static_cast<uint64_t>(1) << diff;
|
||||
mask = ((size - 1) << _auxiliary->_shift);
|
||||
mask = (static_cast<uint32_t>(size - 1) << _auxiliary->_shift);
|
||||
shift = _auxiliary->_shift;
|
||||
}
|
||||
_state.unlock();
|
||||
|
|
|
@ -81,7 +81,7 @@ class Table : public std::enable_shared_from_this<Table> {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Construct a new table of size 2^(logSize) in disabled state.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
Table(uint32_t logSize);
|
||||
explicit Table(uint32_t logSize);
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -75,7 +75,6 @@ bool TransactionalCache::insert(CachedValue* value) {
|
|||
bool maybeMigrate = false;
|
||||
bool allowed = !bucket->isBlacklisted(hash);
|
||||
if (allowed) {
|
||||
bool eviction = false;
|
||||
int64_t change = value->size();
|
||||
CachedValue* candidate = bucket->find(hash, value->key(), value->keySize);
|
||||
|
||||
|
@ -96,6 +95,7 @@ bool TransactionalCache::insert(CachedValue* value) {
|
|||
_metadata.unlock();
|
||||
|
||||
if (allowed) {
|
||||
bool eviction = false;
|
||||
if (candidate != nullptr) {
|
||||
bucket->evict(candidate, true);
|
||||
freeValue(candidate);
|
||||
|
|
|
@ -887,8 +887,8 @@ std::vector<std::string> DatabaseFeature::getDatabaseNamesForUser(
|
|||
}
|
||||
|
||||
void DatabaseFeature::useSystemDatabase() {
|
||||
bool result = useDatabase(TRI_VOC_SYSTEM_DATABASE);
|
||||
TRI_ASSERT(result);
|
||||
TRI_vocbase_t* result = useDatabase(TRI_VOC_SYSTEM_DATABASE);
|
||||
TRI_ASSERT(result != nullptr);
|
||||
}
|
||||
|
||||
/// @brief get a coordinator database by its id
|
||||
|
|
|
@ -362,15 +362,19 @@ authRouter.get('/graph/:name', function (req, res) {
|
|||
res.throw('bad request', e.message, {cause: e});
|
||||
}
|
||||
|
||||
var multipleIds;
|
||||
if (config.nodeStart) {
|
||||
try {
|
||||
startVertex = db._document(config.nodeStart);
|
||||
} catch (e) {
|
||||
res.throw('bad request', e.message, {cause: e});
|
||||
}
|
||||
|
||||
if (!startVertex) {
|
||||
startVertex = db[vertexName].any();
|
||||
if (config.nodeStart.indexOf(' ') > -1) {
|
||||
multipleIds = config.nodeStart.split(' ');
|
||||
} else {
|
||||
try {
|
||||
startVertex = db._document(config.nodeStart);
|
||||
} catch (e) {
|
||||
res.throw('bad request', e.message, {cause: e});
|
||||
}
|
||||
if (!startVertex) {
|
||||
startVertex = db[vertexName].any();
|
||||
}
|
||||
}
|
||||
} else {
|
||||
startVertex = db[vertexName].any();
|
||||
|
@ -400,16 +404,34 @@ authRouter.get('/graph/:name', function (req, res) {
|
|||
}
|
||||
} else {
|
||||
var aqlQuery;
|
||||
var aqlQueries = [];
|
||||
|
||||
if (config.query) {
|
||||
aqlQuery = config.query;
|
||||
} else {
|
||||
aqlQuery =
|
||||
'FOR v, e, p IN 1..' + (config.depth || '2') + ' ANY "' + startVertex._id + '" GRAPH "' + name + '"';
|
||||
|
||||
if (limit !== 0) {
|
||||
aqlQuery += ' LIMIT ' + limit;
|
||||
if (multipleIds) {
|
||||
/* TODO: uncomment after #75 fix
|
||||
aqlQuery =
|
||||
'FOR x IN ' + JSON.stringify(multipleIds) + ' ' +
|
||||
'FOR v, e, p IN 1..' + (config.depth || '2') + ' ANY x GRAPH "' + name + '"';
|
||||
*/
|
||||
_.each(multipleIds, function (nodeid) {
|
||||
aqlQuery =
|
||||
'FOR v, e, p IN 1..' + (config.depth || '2') + ' ANY "' + nodeid + '" GRAPH "' + name + '"';
|
||||
if (limit !== 0) {
|
||||
aqlQuery += ' LIMIT ' + limit;
|
||||
}
|
||||
aqlQuery += ' RETURN p';
|
||||
aqlQueries.push(aqlQuery);
|
||||
});
|
||||
} else {
|
||||
aqlQuery =
|
||||
'FOR v, e, p IN 1..' + (config.depth || '2') + ' ANY "' + startVertex._id + '" GRAPH "' + name + '"';
|
||||
if (limit !== 0) {
|
||||
aqlQuery += ' LIMIT ' + limit;
|
||||
}
|
||||
aqlQuery += ' RETURN p';
|
||||
}
|
||||
aqlQuery += ' RETURN p';
|
||||
}
|
||||
|
||||
var getAttributeByKey = function (o, s) {
|
||||
|
@ -464,7 +486,18 @@ authRouter.get('/graph/:name', function (req, res) {
|
|||
});
|
||||
} else {
|
||||
// get all nodes and edges which are connected to the given start node
|
||||
cursor = AQL_EXECUTE(aqlQuery);
|
||||
if (aqlQueries.length === 0) {
|
||||
cursor = AQL_EXECUTE(aqlQuery);
|
||||
} else {
|
||||
var x;
|
||||
cursor = AQL_EXECUTE(aqlQueries[0]);
|
||||
for (var k = 1; k < aqlQueries.length; k++) {
|
||||
x = AQL_EXECUTE(aqlQueries[k]);
|
||||
_.each(x.json, function (val) {
|
||||
cursor.json.push(val);
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var nodesObj = {};
|
||||
|
|
|
@ -21,7 +21,7 @@
|
|||
'nodeStart': {
|
||||
type: 'string',
|
||||
name: 'Startnode',
|
||||
desc: 'A valid node id. If empty, a random node will be chosen.',
|
||||
desc: 'A valid node id or space seperated list of id\'s. If empty, a random node will be chosen.',
|
||||
value: 2
|
||||
},
|
||||
'layout': {
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
#include <stdlib.h>
|
||||
#include <crtdbg.h>
|
||||
|
||||
// disable definition of macros MIN and MAX (TODO: test side-effects)
|
||||
#ifndef NOMINMAX
|
||||
#define NOMINMAX
|
||||
#endif
|
||||
|
@ -175,8 +174,10 @@ typedef long suseconds_t;
|
|||
#undef TRI_WITHIN_COMMON
|
||||
|
||||
#ifdef _WIN32
|
||||
// some Windows header defines a macro named free, leading to follow-up compile errors
|
||||
// some Windows headers define macros named free and small,
|
||||
// leading to follow-up compile errors
|
||||
#undef free
|
||||
#undef small
|
||||
#endif
|
||||
|
||||
/// @brief helper macro for calculating strlens for static strings at
|
||||
|
|
|
@ -52,7 +52,8 @@ TEST_CASE("cache::CachedValue", "[cache]") {
|
|||
delete cv;
|
||||
|
||||
// variable key, fixed value
|
||||
cv = CachedValue::construct(v.data(), v.size(), &k, sizeof(uint64_t));
|
||||
cv = CachedValue::construct(v.data(), static_cast<uint32_t>(v.size()), &k,
|
||||
sizeof(uint64_t));
|
||||
REQUIRE(nullptr != cv);
|
||||
REQUIRE(v.size() == cv->keySize);
|
||||
REQUIRE(sizeof(uint64_t) == cv->valueSize);
|
||||
|
@ -119,17 +120,17 @@ TEST_CASE("cache::CachedValue", "[cache]") {
|
|||
std::string k3("TEST");
|
||||
uint64_t v = 1;
|
||||
|
||||
auto cv =
|
||||
CachedValue::construct(k1.data(), k1.size(), &v, sizeof(uint64_t));
|
||||
auto cv = CachedValue::construct(
|
||||
k1.data(), static_cast<uint32_t>(k1.size()), &v, sizeof(uint64_t));
|
||||
|
||||
// same key
|
||||
REQUIRE(cv->sameKey(k1.data(), k1.size()));
|
||||
REQUIRE(cv->sameKey(k1.data(), static_cast<uint32_t>(k1.size())));
|
||||
|
||||
// different length, matching prefix
|
||||
REQUIRE(!cv->sameKey(k2.data(), k2.size()));
|
||||
REQUIRE(!cv->sameKey(k2.data(), static_cast<uint32_t>(k2.size())));
|
||||
|
||||
// same length, different key
|
||||
REQUIRE(!cv->sameKey(k3.data(), k3.size()));
|
||||
REQUIRE(!cv->sameKey(k3.data(), static_cast<uint32_t>(k3.size())));
|
||||
|
||||
delete cv;
|
||||
}
|
||||
|
|
|
@ -57,7 +57,6 @@ TEST_CASE("cache::Metadata", "[cache]") {
|
|||
}
|
||||
|
||||
SECTION("verify usage limits are adjusted and enforced correctly") {
|
||||
bool success;
|
||||
uint64_t overhead = 48;
|
||||
Metadata metadata(1024, 0, 0, 2048 + overhead);
|
||||
|
||||
|
@ -95,7 +94,6 @@ TEST_CASE("cache::Metadata", "[cache]") {
|
|||
}
|
||||
|
||||
SECTION("verify table methods work correctly") {
|
||||
bool success;
|
||||
uint64_t overhead = 48;
|
||||
Metadata metadata(1024, 0, 512, 2048 + overhead);
|
||||
|
||||
|
|
|
@ -303,10 +303,10 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
{
|
||||
auto cacheStats = cacheHit->hitRates();
|
||||
auto managerStats = manager.globalHitRates();
|
||||
REQUIRE(cacheStats.first >= 85.0);
|
||||
REQUIRE(cacheStats.second >= 85.0);
|
||||
REQUIRE(managerStats.first >= 85.0);
|
||||
REQUIRE(managerStats.second >= 85.0);
|
||||
REQUIRE(cacheStats.first >= 80.0);
|
||||
REQUIRE(cacheStats.second >= 80.0);
|
||||
REQUIRE(managerStats.first >= 80.0);
|
||||
REQUIRE(managerStats.second >= 80.0);
|
||||
}
|
||||
|
||||
for (uint64_t i = 1024; i < 2048; i++) {
|
||||
|
@ -317,9 +317,9 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
auto managerStats = manager.globalHitRates();
|
||||
REQUIRE(cacheStats.first == 0.0);
|
||||
REQUIRE(cacheStats.second == 0.0);
|
||||
REQUIRE(managerStats.first > 40.0);
|
||||
REQUIRE(managerStats.first > 35.0);
|
||||
REQUIRE(managerStats.first < 50.0);
|
||||
REQUIRE(managerStats.second > 40.0);
|
||||
REQUIRE(managerStats.second > 35.0);
|
||||
REQUIRE(managerStats.second < 50.0);
|
||||
}
|
||||
|
||||
|
@ -332,13 +332,13 @@ TEST_CASE("cache::PlainCache", "[cache][!hide][longRunning]") {
|
|||
{
|
||||
auto cacheStats = cacheMixed->hitRates();
|
||||
auto managerStats = manager.globalHitRates();
|
||||
REQUIRE(cacheStats.first > 40.0);
|
||||
REQUIRE(cacheStats.first > 35.0);
|
||||
REQUIRE(cacheStats.first < 50.0);
|
||||
REQUIRE(cacheStats.second > 40.0);
|
||||
REQUIRE(cacheStats.second > 35.0);
|
||||
REQUIRE(cacheStats.second < 50.0);
|
||||
REQUIRE(managerStats.first > 40.0);
|
||||
REQUIRE(managerStats.first > 35.0);
|
||||
REQUIRE(managerStats.first < 50.0);
|
||||
REQUIRE(managerStats.second > 40.0);
|
||||
REQUIRE(managerStats.second > 35.0);
|
||||
REQUIRE(managerStats.second < 50.0);
|
||||
}
|
||||
|
||||
|
|
|
@ -102,10 +102,9 @@ TEST_CASE("cache::Table", "[cache]") {
|
|||
auto res = small->setAuxiliary(large);
|
||||
REQUIRE(res.get() == nullptr);
|
||||
|
||||
uint64_t indexSmall = 17; // picked something at "random"
|
||||
uint64_t indexLarge = indexSmall << 2;
|
||||
uint32_t hash =
|
||||
static_cast<uint32_t>(indexSmall << (32 - small->logSize()));
|
||||
uint32_t indexSmall = 17; // picked something at "random"
|
||||
uint32_t indexLarge = indexSmall << 2;
|
||||
uint32_t hash = indexSmall << (32 - small->logSize());
|
||||
|
||||
auto pair = small->fetchAndLockBucket(hash, -1);
|
||||
auto bucket = reinterpret_cast<PlainBucket*>(pair.first);
|
||||
|
@ -132,10 +131,9 @@ TEST_CASE("cache::Table", "[cache]") {
|
|||
auto res = large->setAuxiliary(small);
|
||||
REQUIRE(res.get() == nullptr);
|
||||
|
||||
uint64_t indexLarge = 822; // picked something at "random"
|
||||
uint64_t indexSmall = indexLarge >> 2;
|
||||
uint32_t hash =
|
||||
static_cast<uint32_t>(indexLarge << (32 - large->logSize()));
|
||||
uint32_t indexLarge = 822; // picked something at "random"
|
||||
uint32_t indexSmall = indexLarge >> 2;
|
||||
uint32_t hash = indexLarge << (32 - large->logSize());
|
||||
|
||||
auto subtable = large->auxiliaryBuckets(indexLarge);
|
||||
REQUIRE(subtable.get() != nullptr);
|
||||
|
@ -147,8 +145,8 @@ TEST_CASE("cache::Table", "[cache]") {
|
|||
auto res = small->setAuxiliary(large);
|
||||
REQUIRE(res.get() == nullptr);
|
||||
|
||||
uint64_t indexSmall = 217; // picked something at "random"
|
||||
uint64_t indexLargeBase = indexSmall << 2;
|
||||
uint32_t indexSmall = 217; // picked something at "random"
|
||||
uint32_t indexLargeBase = indexSmall << 2;
|
||||
|
||||
auto subtable = small->auxiliaryBuckets(indexSmall);
|
||||
REQUIRE(subtable.get() != nullptr);
|
||||
|
@ -164,8 +162,8 @@ TEST_CASE("cache::Table", "[cache]") {
|
|||
auto res = small->setAuxiliary(large);
|
||||
REQUIRE(res.get() == nullptr);
|
||||
|
||||
uint64_t indexSmall = 172; // picked something at "random"
|
||||
uint64_t indexLargeBase = indexSmall << 2;
|
||||
uint32_t indexSmall = 172; // picked something at "random"
|
||||
uint32_t indexLargeBase = indexSmall << 2;
|
||||
|
||||
auto subtable = small->auxiliaryBuckets(indexSmall);
|
||||
REQUIRE(subtable.get() != nullptr);
|
||||
|
|
|
@ -115,8 +115,8 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
|
|||
}
|
||||
|
||||
auto hitRates = manager.globalHitRates();
|
||||
REQUIRE(hitRates.first >= 65.0);
|
||||
REQUIRE(hitRates.second >= 85.0);
|
||||
REQUIRE(hitRates.first >= 60.0);
|
||||
REQUIRE(hitRates.second >= 80.0);
|
||||
|
||||
RandomGenerator::shutdown();
|
||||
}
|
||||
|
@ -193,7 +193,7 @@ TEST_CASE("cache with backing store", "[cache][!hide][longRunning]") {
|
|||
}
|
||||
|
||||
auto hitRates = manager.globalHitRates();
|
||||
REQUIRE(hitRates.first >= 35.0);
|
||||
REQUIRE(hitRates.first >= 30.0);
|
||||
REQUIRE(hitRates.second >= 50.0);
|
||||
|
||||
RandomGenerator::shutdown();
|
||||
|
|
Loading…
Reference in New Issue