mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'engine-api' of https://github.com/arangodb/arangodb into engine-api
* 'engine-api' of https://github.com/arangodb/arangodb: Slightly improved any iterator. Moved/split more tests. Cache workaround to pass upgrade test.
This commit is contained in:
commit
e83aad21ac
|
@ -163,6 +163,15 @@ bool Cache::isResizing() {
|
|||
return resizing;
|
||||
}
|
||||
|
||||
bool Cache::isShutdown() {
|
||||
bool shutdown = false;
|
||||
_state.lock();
|
||||
shutdown = !isOperational();
|
||||
_state.unlock();
|
||||
|
||||
return shutdown;
|
||||
}
|
||||
|
||||
void Cache::destroy(std::shared_ptr<Cache> cache) {
|
||||
if (cache.get() != nullptr) {
|
||||
cache->shutdown();
|
||||
|
|
|
@ -112,6 +112,11 @@ class Cache : public std::enable_shared_from_this<Cache> {
|
|||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isResizing();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Check whether the cache has begin the process of shutting down.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
bool isShutdown();
|
||||
|
||||
protected:
|
||||
static constexpr int64_t triesFast = 50;
|
||||
static constexpr int64_t triesSlow = 10000;
|
||||
|
|
|
@ -32,6 +32,7 @@
|
|||
#include "Cache/State.h"
|
||||
#include "Cache/Table.h"
|
||||
#include "Cache/TransactionalBucket.h"
|
||||
#include "Logger/Logger.h"
|
||||
|
||||
#include <stdint.h>
|
||||
#include <atomic>
|
||||
|
|
|
@ -73,6 +73,14 @@ void RocksDBIndex::createCache() {
|
|||
}
|
||||
}
|
||||
|
||||
void RocksDBIndex::disableCache() {
|
||||
TRI_ASSERT(_cacheManager != nullptr);
|
||||
TRI_ASSERT(_useCache);
|
||||
TRI_ASSERT(_cache.get() != nullptr);
|
||||
_useCache = false;
|
||||
_cacheManager->destroyCache(_cache);
|
||||
_cache.reset();
|
||||
}
|
||||
int RocksDBIndex::drop() {
|
||||
// Try to drop the cache as well.
|
||||
if (_useCache && _cache != nullptr) {
|
||||
|
|
|
@ -48,7 +48,6 @@ class RocksDBIndex : public Index {
|
|||
arangodb::velocypack::Slice const&);
|
||||
|
||||
public:
|
||||
|
||||
~RocksDBIndex();
|
||||
|
||||
uint64_t objectId() const { return _objectId; }
|
||||
|
@ -56,7 +55,7 @@ class RocksDBIndex : public Index {
|
|||
bool isPersistent() const override final { return true; }
|
||||
|
||||
int drop() override;
|
||||
|
||||
|
||||
int unload() override {
|
||||
// nothing to do here yet
|
||||
// TODO: free the cache the index uses
|
||||
|
@ -64,13 +63,14 @@ class RocksDBIndex : public Index {
|
|||
}
|
||||
|
||||
/// @brief provides a size hint for the index
|
||||
int sizeHint(transaction::Methods* /*trx*/, size_t /*size*/) override final{
|
||||
int sizeHint(transaction::Methods* /*trx*/, size_t /*size*/) override final {
|
||||
// nothing to do here
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
protected:
|
||||
void createCache();
|
||||
void disableCache();
|
||||
|
||||
protected:
|
||||
uint64_t _objectId;
|
||||
|
|
|
@ -178,26 +178,42 @@ bool RocksDBAllIndexIterator::outOfRange() const {
|
|||
|
||||
uint64_t RocksDBAnyIndexIterator::OFFSET = 0;
|
||||
|
||||
RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_cmp(index->_cmp),
|
||||
_bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())) {
|
||||
uint64_t RocksDBAnyIndexIterator::newOffset(LogicalCollection* collection,
|
||||
transaction::Methods* trx) {
|
||||
auto count = collection->numberDocuments(trx);
|
||||
/*auto adjustment = RandomGenerator::interval(count);
|
||||
OFFSET = (OFFSET + adjustment) % count;
|
||||
return OFFSET;*/
|
||||
if (count == 0) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
return RandomGenerator::interval(count);
|
||||
}
|
||||
|
||||
RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_cmp(index->_cmp),
|
||||
_bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())) {
|
||||
// aquire rocksdb transaction
|
||||
RocksDBTransactionState* state = rocksutils::toRocksTransactionState(trx);
|
||||
rocksdb::Transaction* rtrx = state->rocksTransaction();
|
||||
auto options = state->readOptions();
|
||||
|
||||
|
||||
_iterator.reset(rtrx->GetIterator(options));
|
||||
_iterator->Seek(_bounds.start());
|
||||
|
||||
|
||||
// not thread safe by design
|
||||
uint64_t off = OFFSET++;
|
||||
while (_iterator->Valid() && --off > 0) {
|
||||
_iterator->Next();
|
||||
uint64_t off = newOffset(collection, trx);
|
||||
if (off > 0) {
|
||||
while (_iterator->Valid() && --off > 0) {
|
||||
_iterator->Next();
|
||||
}
|
||||
}
|
||||
if (!_iterator->Valid()) {
|
||||
OFFSET = 0;
|
||||
// OFFSET = 0;
|
||||
_iterator->Seek(_bounds.start());
|
||||
}
|
||||
}
|
||||
|
@ -209,24 +225,22 @@ bool RocksDBAnyIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
TRI_ASSERT(limit > 0); // Someone called with limit == 0. Api broken
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
while (limit > 0) {
|
||||
RocksDBToken token(RocksDBValue::revisionId(_iterator->value()));
|
||||
cb(token);
|
||||
|
||||
|
||||
--limit;
|
||||
_iterator->Next();
|
||||
if (!_iterator->Valid() || outOfRange()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void RocksDBAnyIndexIterator::reset() {
|
||||
_iterator->Seek(_bounds.start());
|
||||
}
|
||||
void RocksDBAnyIndexIterator::reset() { _iterator->Seek(_bounds.start()); }
|
||||
|
||||
bool RocksDBAnyIndexIterator::outOfRange() const {
|
||||
return _cmp->Compare(_iterator->key(), _bounds.end()) > 0;
|
||||
|
@ -350,9 +364,18 @@ int RocksDBPrimaryIndex::insert(transaction::Methods* trx,
|
|||
if (_useCache) {
|
||||
// blacklist from cache
|
||||
bool blacklisted = false;
|
||||
uint64_t attempts = 0;
|
||||
while (!blacklisted) {
|
||||
blacklisted = _cache->blacklist(
|
||||
key.string().data(), static_cast<uint32_t>(key.string().size()));
|
||||
attempts++;
|
||||
if (attempts > 10) {
|
||||
if (_cache->isShutdown()) {
|
||||
disableCache();
|
||||
break;
|
||||
}
|
||||
attempts = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -376,9 +399,18 @@ int RocksDBPrimaryIndex::remove(transaction::Methods* trx,
|
|||
if (_useCache) {
|
||||
// blacklist from cache
|
||||
bool blacklisted = false;
|
||||
uint64_t attempts = 0;
|
||||
while (!blacklisted) {
|
||||
blacklisted = _cache->blacklist(
|
||||
key.string().data(), static_cast<uint32_t>(key.string().size()));
|
||||
attempts++;
|
||||
if (attempts > 10) {
|
||||
if (_cache->isShutdown()) {
|
||||
disableCache();
|
||||
break;
|
||||
}
|
||||
attempts = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -468,15 +500,16 @@ IndexIterator* RocksDBPrimaryIndex::allIterator(transaction::Methods* trx,
|
|||
|
||||
/// @brief request an iterator over all elements in the index in
|
||||
/// a sequential order.
|
||||
IndexIterator* RocksDBPrimaryIndex::anyIterator(transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr) const {
|
||||
IndexIterator* RocksDBPrimaryIndex::anyIterator(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr) const {
|
||||
return new RocksDBAnyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
void RocksDBPrimaryIndex::invokeOnAllElements(transaction::Methods* trx,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback) {
|
||||
void RocksDBPrimaryIndex::invokeOnAllElements(
|
||||
transaction::Methods* trx,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback) {
|
||||
ManagedDocumentResult mmdr;
|
||||
std::unique_ptr<IndexIterator> cursor (allIterator(trx, &mmdr, false));
|
||||
std::unique_ptr<IndexIterator> cursor(allIterator(trx, &mmdr, false));
|
||||
bool cnt = true;
|
||||
auto cb = [&](DocumentIdentifierToken token) {
|
||||
if (cnt) {
|
||||
|
@ -484,11 +517,9 @@ void RocksDBPrimaryIndex::invokeOnAllElements(transaction::Methods* trx,
|
|||
}
|
||||
};
|
||||
while (cursor->next(cb, 1000) && cnt) {
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/// @brief create the iterator, for a single attribute, IN operator
|
||||
IndexIterator* RocksDBPrimaryIndex::createInIterator(
|
||||
transaction::Methods* trx, ManagedDocumentResult* mmdr,
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "Basics/Common.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "Random/RandomGenerator.h"
|
||||
#include "RocksDBEngine/RocksDBIndex.h"
|
||||
#include "RocksDBEngine/RocksDBKeyBounds.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
|
@ -95,23 +96,25 @@ class RocksDBAllIndexIterator final : public IndexIterator {
|
|||
};
|
||||
|
||||
class RocksDBAnyIndexIterator final : public IndexIterator {
|
||||
public:
|
||||
RocksDBAnyIndexIterator(LogicalCollection* collection,
|
||||
transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
RocksDBPrimaryIndex const* index);
|
||||
|
||||
~RocksDBAnyIndexIterator() {}
|
||||
|
||||
char const* typeName() const override { return "any-index-iterator"; }
|
||||
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
||||
void reset() override;
|
||||
|
||||
private:
|
||||
public:
|
||||
RocksDBAnyIndexIterator(LogicalCollection* collection,
|
||||
transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
RocksDBPrimaryIndex const* index);
|
||||
|
||||
~RocksDBAnyIndexIterator() {}
|
||||
|
||||
char const* typeName() const override { return "any-index-iterator"; }
|
||||
|
||||
bool next(TokenCallback const& cb, size_t limit) override;
|
||||
|
||||
void reset() override;
|
||||
|
||||
private:
|
||||
bool outOfRange() const;
|
||||
|
||||
static uint64_t newOffset(LogicalCollection* collection,
|
||||
transaction::Methods* trx);
|
||||
|
||||
RocksDBComparator const* _cmp;
|
||||
std::unique_ptr<rocksdb::Iterator> _iterator;
|
||||
RocksDBKeyBounds _bounds;
|
||||
|
@ -189,10 +192,11 @@ class RocksDBPrimaryIndex final : public RocksDBIndex {
|
|||
|
||||
IndexIterator* anyIterator(transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr) const;
|
||||
|
||||
void invokeOnAllElements(transaction::Methods* trx,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback);
|
||||
|
||||
|
||||
void invokeOnAllElements(
|
||||
transaction::Methods* trx,
|
||||
std::function<bool(DocumentIdentifierToken const&)> callback);
|
||||
|
||||
private:
|
||||
/// @brief create the iterator, for a single attribute, IN operator
|
||||
IndexIterator* createInIterator(transaction::Methods*, ManagedDocumentResult*,
|
||||
|
|
|
@ -0,0 +1,248 @@
|
|||
/*jshint globalstrict:false, strict:false, unused : false */
|
||||
/*global fail, assertTrue, assertFalse, assertEqual, assertMatch */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test the server-side database interface
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var internal = require("internal");
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite: dropping databases while holding references
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var logLevel;
|
||||
function DatabaseSuite () {
|
||||
'use strict';
|
||||
return {
|
||||
|
||||
setUp : function () {
|
||||
logLevel = require("internal").logLevel();
|
||||
internal.db._useDatabase("_system");
|
||||
},
|
||||
|
||||
tearDown : function () {
|
||||
require("internal").logLevel(logLevel);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test whether the expected keys are present in db._version(true)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testVersionDetails : function () {
|
||||
let result = internal.db._version(true);
|
||||
|
||||
let keys = [
|
||||
"architecture",
|
||||
"asan",
|
||||
"asm-crc32",
|
||||
"assertions",
|
||||
"boost-version",
|
||||
"build-date",
|
||||
"compiler",
|
||||
"cplusplus",
|
||||
"debug",
|
||||
"endianness",
|
||||
"failure-tests",
|
||||
"full-version-string",
|
||||
"icu-version",
|
||||
"jemalloc",
|
||||
"maintainer-mode",
|
||||
"openssl-version",
|
||||
"platform",
|
||||
"reactor-type",
|
||||
"rocksdb-version",
|
||||
"server-version",
|
||||
"sse42",
|
||||
"unaligned-access",
|
||||
"v8-version",
|
||||
"vpack-version",
|
||||
"zlib-version"
|
||||
];
|
||||
|
||||
keys.forEach(function(k) {
|
||||
assertTrue(result.hasOwnProperty(k));
|
||||
});
|
||||
},
|
||||
|
||||
testVersionBooleans : function () {
|
||||
let result = internal.db._version(true);
|
||||
|
||||
let keys = [
|
||||
"asan",
|
||||
"asm-crc32",
|
||||
"assertions",
|
||||
"debug",
|
||||
"failure-tests",
|
||||
"jemalloc",
|
||||
"maintainer-mode",
|
||||
"sse42",
|
||||
"unaligned-access"
|
||||
];
|
||||
|
||||
keys.forEach(function(k) {
|
||||
assertTrue(result[k] === "true" || result[k] === "false");
|
||||
});
|
||||
},
|
||||
|
||||
testVersionNumbers : function () {
|
||||
let result = internal.db._version(true);
|
||||
|
||||
let keys = [
|
||||
"boost-version",
|
||||
"icu-version",
|
||||
"rocksdb-version",
|
||||
"server-version",
|
||||
"v8-version",
|
||||
"vpack-version",
|
||||
"zlib-version"
|
||||
];
|
||||
|
||||
keys.forEach(function(k) {
|
||||
assertMatch(/^\d+(\.\d+)*([\.\-][a-z\-]+\d*)?$/, result[k]);
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test references helt on dropped database collections
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDropDatabaseCollectionReferences : function () {
|
||||
assertEqual("_system", internal.db._name());
|
||||
|
||||
try {
|
||||
internal.db._dropDatabase("UnitTestsDatabase0");
|
||||
}
|
||||
catch (err) {
|
||||
}
|
||||
|
||||
assertTrue(internal.db._createDatabase("UnitTestsDatabase0"));
|
||||
|
||||
internal.db._useDatabase("UnitTestsDatabase0");
|
||||
assertEqual("UnitTestsDatabase0", internal.db._name());
|
||||
|
||||
// insert 1000 docs and hold a reference on the data
|
||||
var c = internal.db._create("test");
|
||||
for (var i = 0; i < 1000; ++i) {
|
||||
c.save({ "_key": "test" + i, "value" : i });
|
||||
}
|
||||
assertEqual(1000, c.count());
|
||||
|
||||
internal.db._useDatabase("_system");
|
||||
assertEqual(1000, c.count());
|
||||
|
||||
// drop the database
|
||||
internal.db._dropDatabase("UnitTestsDatabase0");
|
||||
// should be dropped
|
||||
internal.db._databases().forEach(function (d) {
|
||||
if (d === "UnitTestsDatabase0") {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
|
||||
// collection should still be there
|
||||
assertEqual(1000, c.count());
|
||||
assertEqual("test", c.name());
|
||||
|
||||
internal.wait(5);
|
||||
// still...
|
||||
assertEqual(1000, c.count());
|
||||
|
||||
c = null;
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test references helt on documents of dropped databases
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDropDatabaseDocumentReferences : function () {
|
||||
assertEqual("_system", internal.db._name());
|
||||
|
||||
try {
|
||||
internal.db._dropDatabase("UnitTestsDatabase0");
|
||||
}
|
||||
catch (err) {
|
||||
}
|
||||
|
||||
assertTrue(internal.db._createDatabase("UnitTestsDatabase0"));
|
||||
|
||||
internal.db._useDatabase("UnitTestsDatabase0");
|
||||
assertEqual("UnitTestsDatabase0", internal.db._name());
|
||||
|
||||
// insert docs and hold a reference on the data
|
||||
var c = internal.db._create("test");
|
||||
for (var i = 0; i < 10; ++i) {
|
||||
c.save({ "_key": "test" + i, "value" : i });
|
||||
}
|
||||
|
||||
var d0 = c.document("test0");
|
||||
var d4 = c.document("test4");
|
||||
var d9 = c.document("test9");
|
||||
|
||||
c = null;
|
||||
|
||||
internal.db._useDatabase("_system");
|
||||
|
||||
// drop the database
|
||||
internal.db._dropDatabase("UnitTestsDatabase0");
|
||||
// should be dropped
|
||||
internal.db._databases().forEach(function (d) {
|
||||
if (d === "UnitTestsDatabase0") {
|
||||
fail();
|
||||
}
|
||||
});
|
||||
|
||||
assertEqual(0, d0.value);
|
||||
assertEqual(4, d4.value);
|
||||
assertEqual(9, d9.value);
|
||||
|
||||
internal.wait(5);
|
||||
|
||||
assertEqual(0, d0.value);
|
||||
assertEqual(4, d4.value);
|
||||
assertEqual(9, d9.value);
|
||||
|
||||
d0 = null;
|
||||
d4 = null;
|
||||
|
||||
internal.wait(3);
|
||||
assertEqual(9, d9.value);
|
||||
|
||||
d9 = null;
|
||||
},
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(DatabaseSuite);
|
||||
|
||||
return jsunity.done();
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue