mirror of https://gitee.com/bigwinds/arangodb
Bug fix/fix catch test issues (#7044)
This commit is contained in:
parent
28e22d60be
commit
221d036d5d
|
@ -117,10 +117,9 @@ SynchronizeShard::SynchronizeShard(
|
|||
_result.reset(TRI_ERROR_INTERNAL, error.str());
|
||||
setState(FAILED);
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
class SynchronizeShardCallback : public arangodb::ClusterCommCallback {
|
||||
class SynchronizeShardCallback : public arangodb::ClusterCommCallback {
|
||||
public:
|
||||
explicit SynchronizeShardCallback(SynchronizeShard* callie) {};
|
||||
virtual bool operator()(arangodb::ClusterCommResult*) override final {
|
||||
|
@ -130,8 +129,7 @@ public:
|
|||
|
||||
SynchronizeShard::~SynchronizeShard() {}
|
||||
|
||||
|
||||
arangodb::Result getReadLockId (
|
||||
arangodb::Result getReadLockId(
|
||||
std::string const& endpoint, std::string const& database,
|
||||
std::string const& clientId, double timeout, uint64_t& id) {
|
||||
|
||||
|
@ -168,10 +166,8 @@ arangodb::Result getReadLockId (
|
|||
}
|
||||
|
||||
return arangodb::Result();
|
||||
|
||||
}
|
||||
|
||||
|
||||
arangodb::Result collectionCount(
|
||||
std::shared_ptr<arangodb::LogicalCollection> const& col, uint64_t& c) {
|
||||
|
||||
|
@ -204,10 +200,10 @@ arangodb::Result collectionCount(
|
|||
return opResult.result;
|
||||
}
|
||||
|
||||
arangodb::Result addShardFollower (
|
||||
arangodb::Result addShardFollower(
|
||||
std::string const& endpoint, std::string const& database,
|
||||
std::string const& shard, uint64_t lockJobId,
|
||||
std::string const& clientId, double timeout = 120.0 ) {
|
||||
std::string const& clientId, double timeout = 120.0) {
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::MAINTENANCE)
|
||||
<< "addShardFollower: tell the leader to put us into the follower list...";
|
||||
|
@ -291,12 +287,10 @@ arangodb::Result addShardFollower (
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
arangodb::Result removeShardFollower (
|
||||
arangodb::Result removeShardFollower(
|
||||
std::string const& endpoint, std::string const& database,
|
||||
std::string const& shard, std::string const& clientId, double timeout = 120.0) {
|
||||
|
||||
|
||||
LOG_TOPIC(WARN, Logger::MAINTENANCE) <<
|
||||
"removeShardFollower: tell the leader to take us off the follower list...";
|
||||
|
||||
|
@ -332,10 +326,9 @@ arangodb::Result removeShardFollower (
|
|||
|
||||
LOG_TOPIC(WARN, Logger::MAINTENANCE) << "removeShardFollower: success" ;
|
||||
return arangodb::Result();
|
||||
|
||||
}
|
||||
|
||||
arangodb::Result cancelReadLockOnLeader (
|
||||
arangodb::Result cancelReadLockOnLeader(
|
||||
std::string const& endpoint, std::string const& database,
|
||||
uint64_t lockJobId, std::string const& clientId,
|
||||
double timeout = 10.0) {
|
||||
|
@ -370,10 +363,8 @@ arangodb::Result cancelReadLockOnLeader (
|
|||
|
||||
LOG_TOPIC(DEBUG, Logger::MAINTENANCE) << "cancelReadLockOnLeader: success";
|
||||
return arangodb::Result();
|
||||
|
||||
}
|
||||
|
||||
|
||||
arangodb::Result cancelBarrier(
|
||||
std::string const& endpoint, std::string const& database,
|
||||
int64_t barrierId, std::string const& clientId,
|
||||
|
@ -412,10 +403,8 @@ arangodb::Result cancelBarrier(
|
|||
|
||||
LOG_TOPIC(DEBUG, Logger::MAINTENANCE) << "cancelBarrier: success";
|
||||
return arangodb::Result();
|
||||
|
||||
}
|
||||
|
||||
|
||||
arangodb::Result SynchronizeShard::getReadLock(
|
||||
std::string const& endpoint, std::string const& database,
|
||||
std::string const& collection, std::string const& clientId,
|
||||
|
@ -516,7 +505,6 @@ arangodb::Result SynchronizeShard::startReadLockOnLeader(
|
|||
result = getReadLock(endpoint, database, collection, clientId, rlid, timeout);
|
||||
|
||||
return result;
|
||||
|
||||
}
|
||||
|
||||
enum ApplierType {
|
||||
|
@ -546,8 +534,6 @@ arangodb::Result replicationSynchronize(
|
|||
|
||||
std::shared_ptr<InitialSyncer> syncer;
|
||||
|
||||
config.toJson();
|
||||
|
||||
if (applierType == APPLIER_DATABASE) {
|
||||
// database-specific synchronization
|
||||
syncer.reset(new DatabaseInitialSyncer(vocbase, configuration));
|
||||
|
@ -608,7 +594,6 @@ arangodb::Result replicationSynchronize(
|
|||
|
||||
|
||||
arangodb::Result replicationSynchronizeFinalize(VPackSlice const& conf) {
|
||||
|
||||
auto const database = conf.get(DATABASE).copyString();
|
||||
auto const collection = conf.get(COLLECTION).copyString();
|
||||
auto const leaderId = conf.get(LEADER_ID).copyString();
|
||||
|
@ -645,9 +630,7 @@ arangodb::Result replicationSynchronizeFinalize(VPackSlice const& conf) {
|
|||
return r;
|
||||
}
|
||||
|
||||
|
||||
bool SynchronizeShard::first() {
|
||||
|
||||
std::string database = _description.get(DATABASE);
|
||||
std::string planId = _description.get(COLLECTION);
|
||||
std::string shard = _description.get(SHARD);
|
||||
|
@ -669,7 +652,7 @@ bool SynchronizeShard::first() {
|
|||
while(true) {
|
||||
|
||||
if (isStopping()) {
|
||||
_result.reset(TRI_ERROR_INTERNAL, "shutting down");
|
||||
_result.reset(TRI_ERROR_SHUTTING_DOWN);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@
|
|||
#include "Logger/Logger.h"
|
||||
#include "Sharding/ShardingFeature.h"
|
||||
#include "Sharding/ShardingStrategy.h"
|
||||
#include "Sharding/ShardingStrategyDefault.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
@ -175,9 +176,14 @@ ShardingInfo::ShardingInfo(arangodb::velocypack::Slice info, LogicalCollection*
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// set the sharding strategy
|
||||
_shardingStrategy = application_features::ApplicationServer::getFeature<ShardingFeature>("Sharding")->fromVelocyPack(info, this);
|
||||
if (!ServerState::instance()->isRunningInCluster()) {
|
||||
// shortcut, so we do not need to set up the whole application server for testing
|
||||
_shardingStrategy = std::make_unique<ShardingStrategyNone>();
|
||||
} else {
|
||||
_shardingStrategy = application_features::ApplicationServer::getFeature<ShardingFeature>("Sharding")->fromVelocyPack(info, this);
|
||||
}
|
||||
TRI_ASSERT(_shardingStrategy != nullptr);
|
||||
}
|
||||
|
||||
|
|
|
@ -74,7 +74,7 @@ class ChecksumResult : public Result {
|
|||
velocypack::Builder _builder;
|
||||
};
|
||||
|
||||
class LogicalCollection: public LogicalDataSource {
|
||||
class LogicalCollection : public LogicalDataSource {
|
||||
friend struct ::TRI_vocbase_t;
|
||||
|
||||
public:
|
||||
|
|
|
@ -135,13 +135,13 @@ class LogicalDataSource {
|
|||
bool deleted() const noexcept { return _deleted; }
|
||||
virtual Result drop() = 0;
|
||||
std::string const& guid() const noexcept { return _guid; }
|
||||
TRI_voc_cid_t const& id() const noexcept { return _id; } // reference required for ShardDistributionReporterTest
|
||||
TRI_voc_cid_t id() const noexcept { return _id; }
|
||||
std::string const& name() const noexcept { return _name; }
|
||||
TRI_voc_cid_t planId() const noexcept { return _planId; }
|
||||
uint64_t planVersion() const noexcept { return _planVersion; }
|
||||
virtual Result rename(std::string&& newName, bool doSync) = 0;
|
||||
bool system() const noexcept { return _system; }
|
||||
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append a jSON definition of the data-source to the 'builder'
|
||||
/// @param the buffer to append to, must be an open object
|
||||
|
|
|
@ -30,6 +30,8 @@
|
|||
#include "catch.hpp"
|
||||
|
||||
#include "RocksDBEngine/RocksDBCuckooIndexEstimator.h"
|
||||
#include "RocksDBEngine/RocksDBFormat.h"
|
||||
#include "RocksDBEngine/RocksDBTypes.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
|
@ -41,6 +43,7 @@ using namespace arangodb;
|
|||
|
||||
TEST_CASE("IndexEstimator", "[rocksdb][indexestimator]") {
|
||||
// @brief Test insert unique correctness
|
||||
rocksutils::setRocksDBKeyFormatEndianess(RocksDBEndianness::Little);
|
||||
|
||||
SECTION("test_unique_values") {
|
||||
std::vector<uint64_t> toInsert(100);
|
||||
|
|
|
@ -28,11 +28,16 @@
|
|||
#include "catch.hpp"
|
||||
#include "fakeit.hpp"
|
||||
|
||||
#include "ApplicationFeatures/ApplicationServer.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "RestServer/DatabaseFeature.h"
|
||||
#include "RestServer/QueryRegistryFeature.h"
|
||||
#include "Sharding/ShardDistributionReporter.h"
|
||||
#include "SimpleHttpClient/SimpleHttpResult.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ticks.h"
|
||||
#include "tests/IResearch/StorageEngineMock.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
|
@ -94,6 +99,21 @@ static std::shared_ptr<VPackBuilder> buildCountBody(uint64_t count) {
|
|||
}
|
||||
|
||||
SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
||||
arangodb::application_features::ApplicationServer server(nullptr, nullptr);
|
||||
StorageEngineMock engine(server);
|
||||
arangodb::EngineSelectorFeature::ENGINE = &engine;
|
||||
std::vector<std::pair<arangodb::application_features::ApplicationFeature*, bool>> features;
|
||||
features.emplace_back(new arangodb::DatabaseFeature(server), false); // required for TRI_vocbase_t::dropCollection(...)
|
||||
features.emplace_back(new arangodb::QueryRegistryFeature(server), false); // required for TRI_vocbase_t instantiation
|
||||
|
||||
for (auto& f: features) {
|
||||
arangodb::application_features::ApplicationServer::server->addFeature(f.first);
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
f.first->prepare();
|
||||
}
|
||||
|
||||
fakeit::Mock<ClusterComm> commMock;
|
||||
ClusterComm& cc = commMock.get();
|
||||
|
||||
|
@ -106,12 +126,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
std::shared_ptr<CollectionInfoCurrent> cic(&cicInst,
|
||||
[](CollectionInfoCurrent*) {});
|
||||
|
||||
fakeit::Mock<LogicalCollection> colMock;
|
||||
LogicalCollection& col = colMock.get();
|
||||
|
||||
std::string dbname = "UnitTestDB";
|
||||
std::string colName = "UnitTestCollection";
|
||||
TRI_voc_cid_t cid = 1337;
|
||||
std::string cidString = "1337";
|
||||
|
||||
std::string s1 = "s1234";
|
||||
|
@ -125,6 +141,10 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
std::string dbserver1short = "DBServer1";
|
||||
std::string dbserver2short = "DBServer2";
|
||||
std::string dbserver3short = "DBServer3";
|
||||
|
||||
TRI_vocbase_t vocbase(TRI_vocbase_type_e::TRI_VOCBASE_TYPE_NORMAL, 1, "testVocbase");
|
||||
auto json = arangodb::velocypack::Parser::fromJson("{ \"cid\" : \"1337\", \"name\": \"UnitTestCollection\" }");
|
||||
arangodb::LogicalCollection col(vocbase, json->slice(), true);
|
||||
|
||||
// Fake the aliases
|
||||
auto aliases =
|
||||
|
@ -135,6 +155,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
// Fake the shard map
|
||||
auto shards = std::make_shared<ShardMap>();
|
||||
ShardMap currentShards;
|
||||
|
||||
col.setShardMap(shards);
|
||||
|
||||
// Fake the collections
|
||||
std::vector<std::shared_ptr<LogicalCollection>> allCollections;
|
||||
|
@ -167,12 +189,6 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
return cic;
|
||||
});
|
||||
|
||||
const_cast<std::string&>(col.name()).assign(colName);
|
||||
fakeit::When(
|
||||
ConstOverloadedMethod(colMock, shardIds, std::shared_ptr<ShardMap>()))
|
||||
.AlwaysReturn(shards);
|
||||
const_cast<TRI_voc_cid_t&>(col.id()) = cid;
|
||||
|
||||
ShardDistributionReporter testee(
|
||||
std::shared_ptr<ClusterComm>(&cc, [](ClusterComm*) {}), &ci);
|
||||
|
||||
|
@ -227,6 +243,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
std::vector<ServerID>{dbserver2, dbserver1, dbserver3});
|
||||
shards->emplace(s3,
|
||||
std::vector<ServerID>{dbserver3, dbserver1, dbserver2});
|
||||
|
||||
col.setShardMap(shards);
|
||||
|
||||
currentShards.emplace(
|
||||
s1, std::vector<ServerID>{dbserver1, dbserver2, dbserver3});
|
||||
|
@ -843,6 +861,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
WHEN("testing distribution for database") {
|
||||
GIVEN("A single collection of three shards, and 3 replicas") {
|
||||
shards->emplace(s1, std::vector<ServerID>{dbserver1, dbserver2, dbserver3});
|
||||
|
||||
col.setShardMap(shards);
|
||||
|
||||
currentShards.emplace(s1, std::vector<ServerID>{dbserver1});
|
||||
|
||||
|
@ -946,6 +966,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
WHEN("testing collection distribution for database") {
|
||||
GIVEN("A single collection of three shards, and 3 replicas") {
|
||||
shards->emplace(s1, std::vector<ServerID>{dbserver1, dbserver2, dbserver3});
|
||||
|
||||
col.setShardMap(shards);
|
||||
|
||||
currentShards.emplace(s1, std::vector<ServerID>{dbserver1});
|
||||
|
||||
|
@ -1088,6 +1110,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
WHEN("testing distribution for database") {
|
||||
GIVEN("An unhealthy cluster") {
|
||||
shards->emplace(s1, std::vector<ServerID>{dbserver1, dbserver2, dbserver3});
|
||||
|
||||
col.setShardMap(shards);
|
||||
|
||||
currentShards.emplace(s1, std::vector<ServerID>{dbserver1});
|
||||
|
||||
|
@ -1411,6 +1435,8 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
WHEN("testing collection distribution for database") {
|
||||
GIVEN("An unhealthy cluster") {
|
||||
shards->emplace(s1, std::vector<ServerID>{dbserver1, dbserver2, dbserver3});
|
||||
|
||||
col.setShardMap(shards);
|
||||
|
||||
currentShards.emplace(s1, std::vector<ServerID>{dbserver1});
|
||||
|
||||
|
@ -1879,6 +1905,10 @@ SCENARIO("The shard distribution can be reported", "[cluster][shards]") {
|
|||
}*/
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& f: features) {
|
||||
f.first->unprepare();
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
|
Loading…
Reference in New Issue