1
0
Fork 0

Feature/add cluster force one shard option (#10300)

This commit is contained in:
Jan 2019-10-28 13:23:33 +01:00 committed by GitHub
parent 881dc59eca
commit ae818e07d5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
27 changed files with 476 additions and 272 deletions

View File

@ -140,7 +140,7 @@ class Message {
}
/// get the content as a slice
velocypack::Slice slice() {
velocypack::Slice slice() const {
auto slices = this->slices();
if (!slices.empty()) {
return slices[0];

View File

@ -14,7 +14,7 @@ devel
* Made the mechanism in the Web UI of replacing and upgrading a foxx app more clear.
* Fixed search not working in document view while in code mode
* Fixed search not working in document view while in code mode.
* Show shards of all collections (including system collections) in the web UI's shard
distribution view.
@ -23,10 +23,10 @@ devel
`distributeShardsLike` in case the prototype is a system collection, and the prototype
should be moved to another server.
* RClone URL normalization
* Rclone URL normalization.
* Fixed Unintended multiple unlock commands from coordinator to
transaction locked db servers
* Fixed unintended multiple unlock commands from coordinator to
transaction locked db servers.
* Disallow using `_id` or `_rev` as shard keys in clustered collections.
@ -46,7 +46,7 @@ devel
in the cluster. It now also may speed up fullCount with sorted indexes and a
limit.
* Fix config directory handling, so we don't trap into UNC path lookups on windows
* Fix config directory handling, so we don't trap into UNC path lookups on Windows.
* Prevent spurious log message "Scheduler queue is filled more than 50% in last x s"
from occurring when this is not the case. Due to a data race, the message could
@ -68,7 +68,7 @@ devel
The queue sizes can still be adjusted at server start using the above-
mentioned startup options.
* Fix compilation issue with clang 10
* Fix compilation issue with clang 10.
* Fixed issue #10062: AQL: Could not extract custom attribute.
@ -114,7 +114,7 @@ devel
This affected at least /_admin/cluster/health.
* Fixed the removal (including a collection drop) of an orphanCollection from a
graph definition when using the ArangShell. The boolean
graph definition when using the ArangoShell. The boolean
flag whether to drop the collection or not was not transferred properly.
* Retry hot backup list in cluster for 2 minutes before reporting error.
@ -136,7 +136,7 @@ devel
* Fixed adding an orphan collections as the first collection in a SmartGraph.
* Fixed issue #9862: ServerException: RestHandler/RestCursorHandler.cpp:279
* Fixed issue #9862: ServerException: RestHandler/RestCursorHandler.cpp:279.
This fixes an issue with the RocksDB primary index IN iterator not resetting its
internal iterator after being rearmed with new lookup values (which only happens
@ -205,9 +205,9 @@ devel
* Fixed cut'n'pasting code from the documentation into arangosh.
* Added initial support for wgs84 reference ellipsoid in GEO_DISTANCE through third
optional parameter to AQL function
optional parameter to AQL function.
* Added support for area calculations with GEO_AREA AQL function
* Added support for area calculations with GEO_AREA AQL function.
* Added resign leadership job to supervision.
@ -221,15 +221,17 @@ devel
* Updated TOKENS function to deal with primitive types and arrays.
* Fixed agency nodes to not create bogus keys on delete / observe / unobserve
* Fixed agency nodes to not create bogus keys on delete / observe / unobserve.
* Fixed an agency bug found in Windows tests.
v3.5.0-rc.7 (2019-08-01)
------------------------
* Upgraded arangodb starter version to 0.14.12.
v3.5.0-rc.6 (2019-07-29)
------------------------

View File

@ -317,7 +317,6 @@ void OptimizerRulesFeature::addRules() {
// must be the first cluster optimizer rule
registerRule("cluster-one-shard", clusterOneShardRule, OptimizerRule::clusterOneShardRule,
OptimizerRule::makeFlags(OptimizerRule::Flags::CanBeDisabled,
OptimizerRule::Flags::DisabledByDefault,
OptimizerRule::Flags::ClusterOnly));
#endif

View File

@ -420,7 +420,7 @@ bool RestAqlHandler::killQuery(std::string const& idString) {
// "number": must be a positive integer, the cursor skips as many items,
// possibly exhausting the cursor.
// The result is a JSON with the attributes "error" (boolean),
// "errorMessage" (if applicable) and "exhausted" (boolean)
// "errorMessage" (if applicable) and "done" (boolean)
// to indicate whether or not the cursor is exhausted.
// If "number" is not given it defaults to 1.
// For the "initializeCursor" operation, one has to bind the following
@ -515,30 +515,6 @@ RestStatus RestAqlHandler::execute() {
}
break;
}
case rest::RequestType::GET: {
// in 3.3, the only GET API was /_api/aql/hasMore. Now, there is none
// in 3.4. we need to keep the old route for compatibility with 3.3
// however.
if (suffixes.size() != 2 || suffixes[0] != "hasMore") {
std::string msg("Unknown GET API: ");
msg += arangodb::basics::StringUtils::join(suffixes, '/');
LOG_TOPIC("68e57", ERR, arangodb::Logger::AQL) << msg;
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_HTTP_NOT_FOUND,
std::move(msg));
} else {
// for /_api/aql/hasMore, now always return with a hard-coded response
// that contains "hasMore" : true. This seems good enough to ensure
// compatibility with 3.3.
VPackBuilder answerBody;
{
VPackObjectBuilder guard(&answerBody);
answerBody.add("hasMore", VPackValue(true));
answerBody.add("error", VPackValue(false));
}
sendResponse(rest::ResponseCode::OK, answerBody.slice());
}
break;
}
case rest::RequestType::DELETE_REQ: {
if (suffixes.size() != 2) {
std::string msg("Unknown DELETE API: ");
@ -560,6 +536,7 @@ RestStatus RestAqlHandler::execute() {
}
break;
}
case rest::RequestType::GET:
case rest::RequestType::HEAD:
case rest::RequestType::PATCH:
case rest::RequestType::OPTIONS:
@ -698,7 +675,6 @@ RestStatus RestAqlHandler::handleUseQuery(std::string const& operation, Query* q
answerBuilder.add("done", VPackValue(state == ExecutionState::DONE));
if (items.get() == nullptr) {
// Backwards Compatibility
answerBuilder.add("exhausted", VPackValue(true));
answerBuilder.add(StaticStrings::Error, VPackValue(false));
} else {
items->toVelocyPack(query->trx(), answerBuilder);
@ -738,7 +714,7 @@ RestStatus RestAqlHandler::handleUseQuery(std::string const& operation, Query* q
} else if (operation == "initializeCursor") {
auto pos = VelocyPackHelper::getNumericValue<size_t>(querySlice, "pos", 0);
Result res;
if (VelocyPackHelper::getBooleanValue(querySlice, "exhausted", true)) {
if (VelocyPackHelper::getBooleanValue(querySlice, "done", true)) {
auto tmpRes = query->engine()->initializeCursor(nullptr, 0);
if (tmpRes.first == ExecutionState::WAITING) {
return RestStatus::WAITING;

View File

@ -47,14 +47,7 @@ using namespace arangodb::basics;
using namespace arangodb::options;
ClusterFeature::ClusterFeature(application_features::ApplicationServer& server)
: ApplicationFeature(server, "Cluster"),
_unregisterOnShutdown(false),
_enableCluster(false),
_requirePersistedId(false),
_heartbeatThread(nullptr),
_heartbeatInterval(0),
_agencyCallbackRegistry(nullptr),
_requestedRole(ServerState::RoleEnum::ROLE_UNDEFINED) {
: ApplicationFeature(server, "Cluster") {
setOptional(true);
startsAfter<CommunicationFeaturePhase>();
startsAfter<DatabaseFeaturePhase>();
@ -148,16 +141,20 @@ void ClusterFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
"maximum replication factor for new collections (0 = unrestricted)",
new UInt32Parameter(&_maxReplicationFactor)).setIntroducedIn(30600);
options->addOption("--cluster.max-number-of-shards",
"maximum number of shards when creating new collections (0 = unrestricted)",
new UInt32Parameter(&_maxNumberOfShards)).setIntroducedIn(30501);
options->addOption("--cluster.force-one-shard",
"force one-shard mode for all new collections",
new BooleanParameter(&_forceOneShard)).setIntroducedIn(30600);
options->addOption(
"--cluster.create-waits-for-sync-replication",
"active coordinator will wait for all replicas to create collection",
new BooleanParameter(&_createWaitsForSyncReplication),
arangodb::options::makeFlags(arangodb::options::Flags::Hidden));
options->addOption("--cluster.max-number-of-shards",
"maximum number of shards when creating new collections (0 = unrestricted)",
new UInt32Parameter(&_maxNumberOfShards)).setIntroducedIn(30501);
options->addOption(
"--cluster.index-create-timeout",
"amount of time (in seconds) the coordinator will wait for an index to "
@ -178,6 +175,14 @@ void ClusterFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
<< "details.";
FATAL_ERROR_EXIT();
}
if (_forceOneShard) {
_maxNumberOfShards = 1;
} else if (_maxNumberOfShards == 0) {
LOG_TOPIC("e83c2", FATAL, arangodb::Logger::CLUSTER)
<< "Invalid value for `--max-number-of-shards`. The value must be at least 1";
FATAL_ERROR_EXIT();
}
if (_minReplicationFactor == 0) {
// min replication factor must not be 0
@ -497,10 +502,14 @@ void ClusterFeature::start() {
std::string myId = ServerState::instance()->getId();
LOG_TOPIC("b6826", INFO, arangodb::Logger::CLUSTER)
<< "Cluster feature is turned on. Agency version: " << version
<< ", Agency endpoints: " << endpoints << ", server id: '" << myId
<< "Cluster feature is turned on"
<< (_forceOneShard ? " with one-shard mode" : "")
<< ". Agency version: " << version
<< ", Agency endpoints: " << endpoints
<< ", server id: '" << myId
<< "', internal endpoint / address: " << _myEndpoint
<< "', advertised endpoint: " << _myAdvertisedEndpoint << ", role: " << role;
<< "', advertised endpoint: " << _myAdvertisedEndpoint
<< ", role: " << role;
AgencyCommResult result = comm.getValues("Sync/HeartbeatIntervalMs");

View File

@ -44,6 +44,7 @@ class ClusterFeature : public application_features::ApplicationFeature {
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void beginShutdown() override final;
void unprepare() override final;
@ -55,6 +56,33 @@ class ClusterFeature : public application_features::ApplicationFeature {
std::string const& myRole() const noexcept { return _myRole; }
void syncDBServerStatusQuo();
AgencyCallbackRegistry* agencyCallbackRegistry() const {
return _agencyCallbackRegistry.get();
}
std::string const agencyCallbacksPath() const {
return "/_api/agency/agency-callbacks";
}
std::string const clusterRestPath() const { return "/_api/cluster"; }
void setUnregisterOnShutdown(bool);
bool createWaitsForSyncReplication() const {
return _createWaitsForSyncReplication;
}
std::uint32_t writeConcern() const { return _writeConcern; }
std::uint32_t systemReplicationFactor() { return _systemReplicationFactor; }
std::uint32_t defaultReplicationFactor() { return _defaultReplicationFactor; }
std::uint32_t maxNumberOfShards() const { return _maxNumberOfShards; }
std::uint32_t minReplicationFactor() const { return _minReplicationFactor; }
std::uint32_t maxReplicationFactor() const { return _maxReplicationFactor; }
double indexCreationTimeout() const { return _indexCreationTimeout; }
bool forceOneShard() const { return _forceOneShard; }
std::shared_ptr<HeartbeatThread> heartbeatThread();
ClusterInfo& clusterInfo();
protected:
void startHeartbeatThread(AgencyCallbackRegistry* agencyCallbackRegistry,
@ -62,6 +90,8 @@ class ClusterFeature : public application_features::ApplicationFeature {
const std::string& endpoints);
private:
void reportRole(ServerState::RoleEnum);
std::vector<std::string> _agencyEndpoints;
std::string _agencyPrefix;
std::string _myRole;
@ -74,48 +104,16 @@ class ClusterFeature : public application_features::ApplicationFeature {
std::uint32_t _maxReplicationFactor = 10; // maximum replication factor (0 = unrestricted)
std::uint32_t _maxNumberOfShards = 1000; // maximum number of shards (0 = unrestricted)
bool _createWaitsForSyncReplication = true;
bool _forceOneShard = false;
bool _unregisterOnShutdown = false;
bool _enableCluster = false;
bool _requirePersistedId = false;
double _indexCreationTimeout = 3600.0;
void reportRole(ServerState::RoleEnum);
public:
AgencyCallbackRegistry* agencyCallbackRegistry() const {
return _agencyCallbackRegistry.get();
}
std::string const agencyCallbacksPath() const {
return "/_api/agency/agency-callbacks";
};
std::string const clusterRestPath() const { return "/_api/cluster"; };
void setUnregisterOnShutdown(bool);
bool createWaitsForSyncReplication() const {
return _createWaitsForSyncReplication;
};
double indexCreationTimeout() const { return _indexCreationTimeout; }
std::uint32_t writeConcern() const { return _writeConcern; }
std::uint32_t systemReplicationFactor() { return _systemReplicationFactor; }
std::uint32_t defaultReplicationFactor() { return _defaultReplicationFactor; }
std::uint32_t maxNumberOfShards() const { return _maxNumberOfShards; }
std::uint32_t minReplicationFactor() const { return _minReplicationFactor; }
std::uint32_t maxReplicationFactor() const { return _maxReplicationFactor; }
void stop() override final;
std::shared_ptr<HeartbeatThread> heartbeatThread();
ClusterInfo& clusterInfo();
private:
std::unique_ptr<ClusterInfo> _clusterInfo;
bool _unregisterOnShutdown;
bool _enableCluster;
bool _requirePersistedId;
std::shared_ptr<HeartbeatThread> _heartbeatThread;
uint64_t _heartbeatInterval;
uint64_t _heartbeatInterval = 0;
std::unique_ptr<AgencyCallbackRegistry> _agencyCallbackRegistry;
ServerState::RoleEnum _requestedRole;
ServerState::RoleEnum _requestedRole = ServerState::RoleEnum::ROLE_UNDEFINED;
};
} // namespace arangodb

View File

@ -42,6 +42,7 @@
#include "Random/RandomGenerator.h"
#include "Rest/CommonDefines.h"
#include "RestServer/DatabaseFeature.h"
#include "RestServer/SystemDatabaseFeature.h"
#include "StorageEngine/PhysicalCollection.h"
#include "Utils/Events.h"
#include "VocBase/LogicalCollection.h"
@ -1013,6 +1014,29 @@ void ClusterInfo::loadPlan() {
<< ", doneVersion=" << _planProt.doneVersion;
}
if (ServerState::instance()->isCoordinator()) {
auto systemDB = _server.getFeature<arangodb::SystemDatabaseFeature>().use();
if (systemDB && systemDB->shardingPrototype() == ShardingPrototype::Undefined) {
// sharding prototype of _system database defaults to _users nowadays
systemDB->setShardingPrototype(ShardingPrototype::Users);
// but for "old" databases it may still be "_graphs". we need to find out!
// find _system database in Plan
auto it = newCollections.find(StaticStrings::SystemDatabase);
if (it != newCollections.end()) {
// find _graphs collection in Plan
auto it2 = (*it).second.find(StaticStrings::GraphCollection);
if (it2 != (*it).second.end()) {
// found!
if ((*it2).second->distributeShardsLike().empty()) {
// _graphs collection has no distributeShardsLike, so it is
// the prototype!
systemDB->setShardingPrototype(ShardingPrototype::Graphs);
}
}
}
}
}
WRITE_LOCKER(writeLocker, _planProt.lock);
_plan = std::move(planBuilder);

View File

@ -718,18 +718,23 @@ static std::shared_ptr<std::unordered_map<std::string, std::vector<std::string>>
static std::shared_ptr<std::unordered_map<std::string, std::vector<std::string>>> CloneShardDistribution(
ClusterInfo& ci, std::shared_ptr<LogicalCollection> col,
std::shared_ptr<LogicalCollection> const& other) {
auto result =
std::make_shared<std::unordered_map<std::string, std::vector<std::string>>>();
TRI_ASSERT(col);
TRI_ASSERT(other);
if (!other->distributeShardsLike().empty()) {
CollectionNameResolver resolver(col->vocbase());
std::string name = other->distributeShardsLike();
TRI_voc_cid_t cid = arangodb::basics::StringUtils::uint64(name);
if (cid > 0) {
name = resolver.getCollectionNameCluster(cid);
}
std::string const errorMessage = "Cannot distribute shards like '" + other->name() +
"' it is already distributed like '" +
other->distributeShardsLike() + "'.";
"' it is already distributed like '" + name + "'.";
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_CLUSTER_CHAIN_OF_DISTRIBUTESHARDSLIKE, errorMessage);
}
auto result =
std::make_shared<std::unordered_map<std::string, std::vector<std::string>>>();
// We need to replace the distribute with the cid.
auto cidString = arangodb::basics::StringUtils::itoa(other.get()->id());

View File

@ -81,7 +81,7 @@ std::shared_ptr<transaction::Context> GraphManager::ctx() const {
}
return transaction::StandaloneContext::Create(_vocbase);
};
}
OperationResult GraphManager::createEdgeCollection(std::string const& name,
bool waitForSync, VPackSlice options) {
@ -97,18 +97,40 @@ OperationResult GraphManager::createCollection(std::string const& name, TRI_col_
bool waitForSync, VPackSlice options) {
TRI_ASSERT(colType == TRI_COL_TYPE_DOCUMENT || colType == TRI_COL_TYPE_EDGE);
auto& vocbase = ctx()->vocbase();
VPackBuilder helper;
helper.openObject();
if (ServerState::instance()->isCoordinator()) {
Result res = ShardingInfo::validateShardsAndReplicationFactor(options, ctx()->vocbase().server());
Result res = ShardingInfo::validateShardsAndReplicationFactor(options, vocbase.server());
if (res.fail()) {
return OperationResult(res);
}
bool forceOneShard =
vocbase.server().getFeature<ClusterFeature>().forceOneShard() ||
(vocbase.sharding() == "single" &&
options.get(StaticStrings::DistributeShardsLike).isNone() &&
arangodb::basics::VelocyPackHelper::readNumericValue<uint64_t>(options, StaticStrings::NumberOfShards, 0) <= 1);
if (forceOneShard) {
// force a single shard with shards distributed like "_graph"
helper.add(StaticStrings::NumberOfShards, VPackValue(1));
helper.add(StaticStrings::DistributeShardsLike, VPackValue(vocbase.shardingPrototypeName()));
}
}
helper.close();
VPackBuilder mergedBuilder =
VPackCollection::merge(options, helper.slice(), false, true);
auto res = arangodb::methods::Collections::create( // create collection
ctx()->vocbase(), // collection vocbase
vocbase, // collection vocbase
name, // collection name
colType, // collection type
options, // collection properties
mergedBuilder.slice(), // collection properties
waitForSync, true, false, [](std::shared_ptr<LogicalCollection> const&) -> void {});
return OperationResult(res);

View File

@ -1090,23 +1090,43 @@ Result RestReplicationHandler::processRestoreCollectionCoordinator(
std::string newId = StringUtils::itoa(newIdTick);
toMerge.add("id", VPackValue(newId));
// Number of shards. Will be overwritten if not existent
VPackSlice const numberOfShardsSlice = parameters.get(StaticStrings::NumberOfShards);
if (!numberOfShardsSlice.isInteger()) {
// The information does not contain numberOfShards. Overwrite it.
VPackSlice const shards = parameters.get("shards");
if (shards.isObject()) {
numberOfShards = static_cast<uint64_t>(shards.length());
} else {
// "shards" not specified
// now check if numberOfShards property was given
if (numberOfShards == 0) {
// We take one shard if no value was given
numberOfShards = 1;
}
if (_vocbase.server().getFeature<ClusterFeature>().forceOneShard()) {
// force one shard, and force distributeShardsLike to be "_graphs"
toMerge.add(StaticStrings::NumberOfShards, VPackValue(1));
if (!_vocbase.IsSystemName(name)) {
// system-collections will be sharded normally. only user collections will get
// the forced sharding
toMerge.add(StaticStrings::DistributeShardsLike, VPackValue(_vocbase.shardingPrototypeName()));
}
} else {
// Number of shards. Will be overwritten if not existent
VPackSlice const numberOfShardsSlice = parameters.get(StaticStrings::NumberOfShards);
if (!numberOfShardsSlice.isInteger()) {
// The information does not contain numberOfShards. Overwrite it.
VPackSlice const shards = parameters.get("shards");
if (shards.isObject()) {
numberOfShards = static_cast<uint64_t>(shards.length());
} else {
// "shards" not specified
// now check if numberOfShards property was given
if (numberOfShards == 0) {
// We take one shard if no value was given
numberOfShards = 1;
}
}
TRI_ASSERT(numberOfShards > 0);
toMerge.add(StaticStrings::NumberOfShards, VPackValue(numberOfShards));
} else {
numberOfShards = numberOfShardsSlice.getUInt();
}
if (_vocbase.sharding() == "single" &&
parameters.get(StaticStrings::DistributeShardsLike).isNone() &&
!_vocbase.IsSystemName(name) &&
numberOfShards <= 1) {
// shard like _graphs
toMerge.add(StaticStrings::DistributeShardsLike, VPackValue(_vocbase.shardingPrototypeName()));
}
TRI_ASSERT(numberOfShards > 0);
toMerge.add(StaticStrings::NumberOfShards, VPackValue(numberOfShards));
}
// Replication Factor. Will be overwritten if not existent

View File

@ -45,7 +45,8 @@
namespace {
static std::string const FEATURE_NAME("Bootstrap");
static std::string const boostrapKey = "Bootstrap";
static std::string const bootstrapKey = "Bootstrap";
static std::string const healthKey = "Supervision/Health";
}
namespace arangodb {
@ -95,7 +96,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
AgencyComm agency;
auto& ci = feature.server().getFeature<ClusterFeature>().clusterInfo();
while (true) {
AgencyCommResult result = agency.getValues(::boostrapKey);
AgencyCommResult result = agency.getValues(::bootstrapKey);
if (!result.successful()) {
// Error in communication, note that value not found is not an error
LOG_TOPIC("2488f", TRACE, Logger::STARTUP)
@ -105,7 +106,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
}
VPackSlice value = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), ::boostrapKey}));
std::vector<std::string>({AgencyCommManager::path(), ::bootstrapKey}));
if (value.isString()) {
// key was found and is a string
std::string boostrapVal = value.copyString();
@ -115,7 +116,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
<< "raceForClusterBootstrap: bootstrap already done";
return;
} else if (boostrapVal == ServerState::instance()->getId()) {
agency.removeValues(::boostrapKey, false);
agency.removeValues(::bootstrapKey, false);
}
LOG_TOPIC("49437", DEBUG, Logger::STARTUP)
<< "raceForClusterBootstrap: somebody else does the bootstrap";
@ -126,7 +127,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
// No value set, we try to do the bootstrap ourselves:
VPackBuilder b;
b.add(VPackValue(arangodb::ServerState::instance()->getId()));
result = agency.casValue(::boostrapKey, b.slice(), false, 300, 15);
result = agency.casValue(::bootstrapKey, b.slice(), false, 300, 15);
if (!result.successful()) {
LOG_TOPIC("a1ecb", DEBUG, Logger::STARTUP)
<< "raceForClusterBootstrap: lost race, somebody else will bootstrap";
@ -146,7 +147,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
if (dbservers.size() == 0) {
LOG_TOPIC("0ad1c", TRACE, Logger::STARTUP)
<< "raceForClusterBootstrap: no DBservers, waiting";
agency.removeValues(::boostrapKey, false);
agency.removeValues(::bootstrapKey, false);
std::this_thread::sleep_for(std::chrono::seconds(1));
continue;
}
@ -161,7 +162,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
if (upgradeRes.fail()) {
LOG_TOPIC("8903f", ERR, Logger::STARTUP) << "Problems with cluster bootstrap, "
<< "marking as not successful.";
agency.removeValues(::boostrapKey, false);
agency.removeValues(::bootstrapKey, false);
std::this_thread::sleep_for(std::chrono::seconds(1));
continue;
}
@ -182,7 +183,7 @@ void raceForClusterBootstrap(BootstrapFeature& feature) {
b.clear();
b.add(VPackValue(arangodb::ServerState::instance()->getId() + ": done"));
result = agency.setValue(::boostrapKey, b.slice(), 0);
result = agency.setValue(::bootstrapKey, b.slice(), 0);
if (result.successful()) {
return;
}
@ -341,6 +342,31 @@ void BootstrapFeature::start() {
// Start service properly:
ServerState::setServerMode(ServerState::Mode::DEFAULT);
}
if (ServerState::isCoordinator(role)) {
LOG_TOPIC("4000c", DEBUG, arangodb::Logger::CLUSTER) << "waiting for our health entry to appear in Supervision/Health";
bool found = false;
AgencyComm agency;
int tries = 0;
while (++tries < 30) {
AgencyCommResult result = agency.getValues(::healthKey);
if (result.successful()) {
VPackSlice value = result.slice()[0].get(
std::vector<std::string>({AgencyCommManager::path(), "Supervision", "Health", ServerState::instance()->getId(), "Status"}));
if (value.isString() && !value.copyString().empty()) {
found = true;
break;
}
}
std::this_thread::sleep_for(std::chrono::milliseconds(500));
}
if (found) {
LOG_TOPIC("b0de6", DEBUG, arangodb::Logger::CLUSTER) << "found our health entry in Supervision/Health";
} else {
LOG_TOPIC("2c993", INFO, arangodb::Logger::CLUSTER) << "did not find our health entry after 15 s in Supervision/Health";
}
}
LOG_TOPIC("cf3f4", INFO, arangodb::Logger::FIXME)
<< "ArangoDB (version " << ARANGODB_VERSION_FULL

View File

@ -54,7 +54,7 @@ ShardingInfo::ShardingInfo(arangodb::velocypack::Slice info, LogicalCollection*
_shardIds(new ShardMap()) {
bool const isSmart =
basics::VelocyPackHelper::readBooleanValue(info, StaticStrings::IsSmart, false);
if (isSmart && _collection->type() == TRI_COL_TYPE_EDGE) {
// smart edge collection
_numberOfShards = 0;
@ -89,19 +89,17 @@ ShardingInfo::ShardingInfo(arangodb::velocypack::Slice info, LogicalCollection*
"invalid number of shards");
}
if (info.hasKey("avoidServers")) {
auto avoidServersSlice = info.get("avoidServers");
if (avoidServersSlice.isArray()) {
for (const auto& i : VPackArrayIterator(avoidServersSlice)) {
if (i.isString()) {
_avoidServers.push_back(i.copyString());
} else {
LOG_TOPIC("e5bc6", ERR, arangodb::Logger::FIXME)
<< "avoidServers must be a vector of strings, we got "
<< avoidServersSlice.toJson() << ". discarding!";
_avoidServers.clear();
break;
}
auto avoidServersSlice = info.get("avoidServers");
if (avoidServersSlice.isArray()) {
for (const auto& i : VPackArrayIterator(avoidServersSlice)) {
if (i.isString()) {
_avoidServers.push_back(i.copyString());
} else {
LOG_TOPIC("e5bc6", ERR, arangodb::Logger::FIXME)
<< "avoidServers must be a vector of strings, we got "
<< avoidServersSlice.toJson() << ". discarding!";
_avoidServers.clear();
break;
}
}
}
@ -164,7 +162,7 @@ ShardingInfo::ShardingInfo(arangodb::velocypack::Slice info, LogicalCollection*
}
}
}
// replicationFactor == 0 -> satellite collection
if (shardKeysSlice.isNone() || _replicationFactor == 0) {
// Use default.
@ -246,6 +244,7 @@ ShardingInfo::ShardingInfo(ShardingInfo const& other, LogicalCollection* collect
: _collection(collection),
_numberOfShards(other.numberOfShards()),
_replicationFactor(other.replicationFactor()),
_writeConcern(other.writeConcern()),
_distributeShardsLike(other.distributeShardsLike()),
_avoidServers(other.avoidServers()),
_shardKeys(other.shardKeys()),
@ -363,6 +362,7 @@ void ShardingInfo::distributeShardsLike(std::string const& cid, ShardingInfo con
}
_replicationFactor = other->replicationFactor();
_writeConcern = other->writeConcern();
_numberOfShards = other->numberOfShards();
}
@ -485,8 +485,6 @@ int ShardingInfo::getResponsibleShard(arangodb::velocypack::Slice slice, bool do
Result ShardingInfo::validateShardsAndReplicationFactor(arangodb::velocypack::Slice slice,
application_features::ApplicationServer& server) {
Result res;
if (slice.isObject()) {
auto const& cl = server.getFeature<ClusterFeature>();
@ -496,33 +494,35 @@ Result ShardingInfo::validateShardsAndReplicationFactor(arangodb::velocypack::Sl
uint32_t numberOfShards = numberOfShardsSlice.getNumber<uint32_t>();
if (maxNumberOfShards > 0 &&
numberOfShards > maxNumberOfShards) {
res.reset(TRI_ERROR_CLUSTER_TOO_MANY_SHARDS,
std::string("too many shards. maximum number of shards is ") + std::to_string(maxNumberOfShards));
return Result(TRI_ERROR_CLUSTER_TOO_MANY_SHARDS,
std::string("too many shards. maximum number of shards is ") + std::to_string(maxNumberOfShards));
}
TRI_ASSERT((cl.forceOneShard() && numberOfShards <= 1) || !cl.forceOneShard());
}
auto replicationFactorSlice = slice.get(StaticStrings::ReplicationFactor);
if (replicationFactorSlice.isNumber()) {
int64_t replicationFactorProbe = replicationFactorSlice.getNumber<int64_t>();
if (replicationFactorProbe <= 0) {
res.reset(TRI_ERROR_BAD_PARAMETER, "invalid value for replicationFactor");
} else {
uint32_t const minReplicationFactor = cl.minReplicationFactor();
uint32_t const maxReplicationFactor = cl.maxReplicationFactor();
uint32_t replicationFactor = replicationFactorSlice.getNumber<uint32_t>();
return Result(TRI_ERROR_BAD_PARAMETER, "invalid value for replicationFactor");
}
if (replicationFactor > maxReplicationFactor &&
maxReplicationFactor > 0) {
res.reset(TRI_ERROR_BAD_PARAMETER,
std::string("replicationFactor must not be higher than maximum allowed replicationFactor (") + std::to_string(maxReplicationFactor) + ")");
} else if (replicationFactor < minReplicationFactor &&
minReplicationFactor > 0) {
res.reset(TRI_ERROR_BAD_PARAMETER,
std::string("replicationFactor must not be lower than minimum allowed replicationFactor (") + std::to_string(minReplicationFactor) + ")");
}
uint32_t const minReplicationFactor = cl.minReplicationFactor();
uint32_t const maxReplicationFactor = cl.maxReplicationFactor();
uint32_t replicationFactor = replicationFactorSlice.getNumber<uint32_t>();
if (replicationFactor > maxReplicationFactor &&
maxReplicationFactor > 0) {
return Result(TRI_ERROR_BAD_PARAMETER,
std::string("replicationFactor must not be higher than maximum allowed replicationFactor (") + std::to_string(maxReplicationFactor) + ")");
} else if (replicationFactor < minReplicationFactor &&
minReplicationFactor > 0) {
return Result(TRI_ERROR_BAD_PARAMETER,
std::string("replicationFactor must not be lower than minimum allowed replicationFactor (") + std::to_string(minReplicationFactor) + ")");
}
}
}
return res;
return Result();
}

View File

@ -298,29 +298,29 @@ Result Collections::create(TRI_vocbase_t& vocbase,
helper.add(StaticStrings::ReplicationFactor, VPackValue(factor));
}
bool hasDistribute = false;
auto distribute = info.properties.get(StaticStrings::DistributeShardsLike);
if (!distribute.isNone()) {
hasDistribute = true;
}
// system collections will be sharded normally - we avoid a self reference when creating _graphs
if (vocbase.sharding() == "single" && !vocbase.IsSystemName(info.name)) {
if(!hasDistribute) {
helper.add(StaticStrings::DistributeShardsLike, VPackValue(StaticStrings::GraphCollection));
hasDistribute = true;
} else if (distribute.isString() && distribute.compareString("") == 0) {
helper.add(StaticStrings::DistributeShardsLike, VPackSlice::nullSlice()); //delete empty string from info slice
if (!vocbase.IsSystemName(info.name)) {
uint64_t numberOfShards = arangodb::basics::VelocyPackHelper::readNumericValue<uint64_t>(info.properties, StaticStrings::NumberOfShards, 0);
// system-collections will be sharded normally. only user collections will get
// the forced sharding
if (vocbase.server().getFeature<ClusterFeature>().forceOneShard()) {
// force one shard, and force distributeShardsLike to be "_graphs"
helper.add(StaticStrings::NumberOfShards, VPackValue(1));
helper.add(StaticStrings::DistributeShardsLike, VPackValue(vocbase.shardingPrototypeName()));
} else if (vocbase.sharding() == "single" && numberOfShards <= 1) {
auto distributeSlice = info.properties.get(StaticStrings::DistributeShardsLike);
if (distributeSlice.isNone()) {
helper.add(StaticStrings::DistributeShardsLike, VPackValue(vocbase.shardingPrototypeName()));
} else if (distributeSlice.isString() && distributeSlice.compareString("") == 0) {
helper.add(StaticStrings::DistributeShardsLike, VPackSlice::nullSlice()); //delete empty string from info slice
}
}
}
if (!hasDistribute) {
// not an error: for historical reasons the write concern is read from the
// variable "minReplicationFactor"
auto writeConcernSlice = info.properties.get(StaticStrings::MinReplicationFactor);
if (writeConcernSlice.isNone()) {
helper.add(StaticStrings::MinReplicationFactor, VPackValue(vocbase.writeConcern()));
}
// not an error: for historical reasons the write concern is read from the
// variable "minReplicationFactor"
auto writeConcernSlice = info.properties.get(StaticStrings::MinReplicationFactor);
if (writeConcernSlice.isNone()) {
helper.add(StaticStrings::MinReplicationFactor, VPackValue(vocbase.writeConcern()));
}
} else { // single server
helper.add(StaticStrings::DistributeShardsLike, VPackSlice::nullSlice()); //delete empty string from info slice

View File

@ -22,6 +22,7 @@
#include "VocbaseInfo.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "Cluster/ClusterFeature.h"
#include "Cluster/ClusterInfo.h"
@ -32,6 +33,17 @@
namespace arangodb {
CreateDatabaseInfo::CreateDatabaseInfo(application_features::ApplicationServer& server) : _server(server) {}
ShardingPrototype CreateDatabaseInfo::shardingPrototype() const {
if (_name != StaticStrings::SystemDatabase) {
return ShardingPrototype::Graphs;
}
return _shardingPrototype;
}
void CreateDatabaseInfo::shardingPrototype(ShardingPrototype type) {
_shardingPrototype = type;
}
Result CreateDatabaseInfo::load(std::string const& name, uint64_t id) {
Result res;

View File

@ -120,6 +120,9 @@ class CreateDatabaseInfo {
return _sharding;
}
ShardingPrototype shardingPrototype() const;
void shardingPrototype(ShardingPrototype type);
void allowSystemDB(bool s) { _isSystemDB = s; }
private:
@ -133,11 +136,12 @@ class CreateDatabaseInfo {
std::uint64_t _id = 0;
std::string _name = "";
std::string _sharding = "flexible";
std::vector<DBUser> _users;
std::uint32_t _replicationFactor = 1;
std::uint32_t _writeConcern = 1;
std::string _sharding = "flexible";
ShardingPrototype _shardingPrototype = ShardingPrototype::Undefined;
bool _validId = false;
bool _valid = false; // required because TRI_ASSERT needs variable in Release mode.

View File

@ -90,6 +90,12 @@ enum TRI_edge_direction_e {
TRI_EDGE_OUT = 2
};
enum class ShardingPrototype : uint32_t {
Undefined = 0,
Users = 1,
Graphs = 2
};
/// @brief Hash and Equal comparison for a vector of VPackSlice
namespace std {

View File

@ -1823,6 +1823,21 @@ void TRI_vocbase_t::toVelocyPack(VPackBuilder& result) const {
}
}
/// @brief sets prototype collection for sharding (_users or _graphs)
void TRI_vocbase_t::setShardingPrototype(ShardingPrototype type) {
_info.shardingPrototype(type);
}
/// @brief gets prototype collection for sharding (_users or _graphs)
ShardingPrototype TRI_vocbase_t::shardingPrototype() const {
return _info.shardingPrototype();
}
/// @brief gets name of prototype collection for sharding (_users or _graphs)
std::string const& TRI_vocbase_t::shardingPrototypeName() const {
return _info.shardingPrototype() == ShardingPrototype::Users ? StaticStrings::UsersCollection : StaticStrings::GraphCollection;
}
std::vector<std::shared_ptr<arangodb::LogicalView>> TRI_vocbase_t::views() {
TRI_ASSERT(!ServerState::instance()->isCoordinator());
std::vector<std::shared_ptr<arangodb::LogicalView>> views;

View File

@ -273,6 +273,15 @@ struct TRI_vocbase_t {
/// @brief closes a database and all collections
void shutdown();
/// @brief sets prototype collection for sharding (_users or _graphs)
void setShardingPrototype(ShardingPrototype type);
/// @brief gets prototype collection for sharding (_users or _graphs)
ShardingPrototype shardingPrototype() const;
/// @brief gets name of prototype collection for sharding (_users or _graphs)
std::string const& shardingPrototypeName() const;
/// @brief returns all known views
std::vector<std::shared_ptr<arangodb::LogicalView>> views();

View File

@ -68,11 +68,11 @@ uint64_t getReplicationFactor(arangodb::RestoreFeature::Options const& options,
uint64_t result = options.defaultReplicationFactor;
isSatellite = false;
arangodb::velocypack::Slice s = slice.get("replicationFactor");
arangodb::velocypack::Slice s = slice.get(arangodb::StaticStrings::ReplicationFactor);
if (s.isInteger()) {
result = s.getNumericValue<uint64_t>();
} else if (s.isString()) {
if (s.copyString() == "satellite") {
if (s.copyString() == arangodb::StaticStrings::Satellite) {
isSatellite = true;
}
}
@ -90,7 +90,7 @@ uint64_t getReplicationFactor(arangodb::RestoreFeature::Options const& options,
auto parts = arangodb::basics::StringUtils::split(it, '=');
if (parts.size() == 1) {
// this is the default value, e.g. `--replicationFactor 2`
if (parts[0] == "satellite") {
if (parts[0] == arangodb::StaticStrings::Satellite) {
isSatellite = true;
} else {
result = arangodb::basics::StringUtils::uint64(parts[0]);
@ -102,7 +102,7 @@ uint64_t getReplicationFactor(arangodb::RestoreFeature::Options const& options,
// somehow invalid or different collection
continue;
}
if (parts[1] == "satellite") {
if (parts[1] == arangodb::StaticStrings::Satellite) {
isSatellite = true;
} else {
result = arangodb::basics::StringUtils::uint64(parts[1]);
@ -416,11 +416,11 @@ arangodb::Result sendRestoreCollection(arangodb::httpclient::SimpleHttpClient& h
bool isSatellite = false;
uint64_t replicationFactor = getReplicationFactor(options, parameters, isSatellite);
if (isSatellite) {
newOptions.add("replicationFactor", VPackValue("satellite"));
newOptions.add(arangodb::StaticStrings::ReplicationFactor, VPackValue(arangodb::StaticStrings::Satellite));
} else {
newOptions.add("replicationFactor", VPackValue(replicationFactor));
newOptions.add(arangodb::StaticStrings::ReplicationFactor, VPackValue(replicationFactor));
}
newOptions.add("numberOfShards", VPackValue(getNumberOfShards(options, parameters)));
newOptions.add(arangodb::StaticStrings::NumberOfShards, VPackValue(getNumberOfShards(options, parameters)));
newOptions.close();
VPackBuilder b;

View File

@ -0,0 +1,105 @@
/*jshint globalstrict:false, strict:false */
/* global getOptions, assertEqual, assertUndefined, fail, arango */
////////////////////////////////////////////////////////////////////////////////
/// @brief test for security-related server options
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB Inc, Cologne, Germany
///
/// @author Jan Steemann
/// @author Copyright 2019, ArangoDB Inc, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
if (getOptions === true) {
return {
'cluster.max-number-of-shards': 3,
'cluster.min-replication-factor': 2,
'cluster.max-replication-factor': 3
};
}
var jsunity = require('jsunity');
const errors = require('@arangodb').errors;
const cn = "UnitTestsCollection";
let db = require('internal').db;
function testSuite() {
return {
setUp: function() {
db._drop(cn);
},
tearDown: function() {
db._drop(cn);
},
testCreateCollectionNoShards : function() {
let c = db._create(cn);
let props = c.properties();
assertEqual(1, props.numberOfShards);
},
testCreateCollectionOneShard : function() {
let c = db._create(cn, { numberOfShards: 1 });
let props = c.properties();
assertEqual(1, props.numberOfShards);
},
testCreateCollectionMaximumShards : function() {
let c = db._create(cn, { numberOfShards: 3 });
let props = c.properties();
assertEqual(3, props.numberOfShards);
},
testCreateCollectionTooManyShards : function() {
try {
db._create(cn, { numberOfShards: 4 });
fail();
} catch (err) {
assertEqual(errors.ERROR_CLUSTER_TOO_MANY_SHARDS.code, err.errorNum);
}
},
testCreateCollectionMinReplicationFactor : function() {
let c = db._create(cn, { replicationFactor: 2 });
let props = c.properties();
assertEqual(2, props.replicationFactor);
},
testCreateCollectionMaxReplicationFactor : function() {
let c = db._create(cn, { replicationFactor: 3 }, 2, { enforceReplicationFactor: false });
let props = c.properties();
assertEqual(3, props.replicationFactor);
},
testCreateCollectionReplicationFactorTooHigh : function() {
try {
db._create(cn, { replicationFactor: 4 }, 2, { enforceReplicationFactor: false });
fail();
} catch (err) {
assertEqual(errors.ERROR_BAD_PARAMETER.code, err.errorNum);
}
},
};
}
jsunity.run(testSuite);
return jsunity.done();

View File

@ -472,11 +472,11 @@ function IResearchFeatureDDLTestSuite () {
var meta = { links: { "TestCollection0": { includeAllFields: true } } };
view.properties(meta, true); // partial update
var result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
var result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(0, result.length);
col0.save({ name: "quarter", text: "quick over" });
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(1, result.length);
assertEqual("quarter", result[0].name);
@ -494,7 +494,7 @@ function IResearchFeatureDDLTestSuite () {
meta = { links: { "TestCollection0": { includeAllFields: true } } };
view.properties(meta, true); // partial update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(4, result.length);
assertEqual("full", result[0].name);
assertEqual("half", result[1].name);
@ -520,7 +520,7 @@ function IResearchFeatureDDLTestSuite () {
} };
view.properties(meta, true); // partial update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(4, result.length);
assertEqual("full", result[0].name);
assertEqual("half", result[1].name);
@ -549,7 +549,7 @@ function IResearchFeatureDDLTestSuite () {
} };
view.properties(meta, true); // partial update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(4, result.length);
assertEqual("full", result[0].name);
assertEqual("half", result[1].name);
@ -586,7 +586,7 @@ function IResearchFeatureDDLTestSuite () {
};
view.properties(meta, true); // partial update
var result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
var result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(0, result.length);
var properties = view.properties();
assertTrue(Object === properties.constructor);
@ -598,7 +598,8 @@ function IResearchFeatureDDLTestSuite () {
assertEqual((0.5).toFixed(6), properties.consolidationPolicy.threshold.toFixed(6));
col0.save({ name: "quarter", text: "quick over" });
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(1, result.length);
assertEqual("quarter", result[0].name);
@ -623,7 +624,7 @@ function IResearchFeatureDDLTestSuite () {
};
view.properties(meta, true); // partial update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(4, result.length);
assertEqual("full", result[0].name);
assertEqual("half", result[1].name);
@ -664,7 +665,7 @@ function IResearchFeatureDDLTestSuite () {
};
view.properties(meta, true); // partial update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(4, result.length);
assertEqual("full", result[0].name);
assertEqual("half", result[1].name);
@ -708,7 +709,7 @@ function IResearchFeatureDDLTestSuite () {
};
view.properties(meta, true); // partial update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(4, result.length);
assertEqual("full", result[0].name);
assertEqual("half", result[1].name);
@ -724,7 +725,8 @@ function IResearchFeatureDDLTestSuite () {
assertEqual((0.5).toFixed(6), properties.consolidationPolicy.threshold.toFixed(6));
view.properties({}, false); // full update (reset to defaults)
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.name RETURN doc").toArray();
assertEqual(0, result.length);
properties = view.properties();
assertTrue(Object === properties.constructor);
@ -836,7 +838,7 @@ function IResearchFeatureDDLTestSuite () {
var meta = { links: { "TestCollection0": { fields: { a: {} } } } };
view.properties(meta, true); // partial update
var result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.z RETURN doc").toArray();
var result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.z RETURN doc").toArray();
assertEqual(2, result.length);
assertEqual(0, result[0].z);
assertEqual(1, result[1].z);
@ -848,7 +850,7 @@ function IResearchFeatureDDLTestSuite () {
assertNotEqual(undefined, updatedMeta.links.TestCollection0.fields.b);
assertEqual(undefined, updatedMeta.links.TestCollection0.fields.a);
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.z RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.z RETURN doc").toArray();
assertEqual(2, result.length);
assertEqual(2, result[0].z);
assertEqual(3, result[1].z);
@ -856,7 +858,7 @@ function IResearchFeatureDDLTestSuite () {
meta = { links: { "TestCollection0": { fields: { c: {} } } } };
view.properties(meta, false); // full update
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.z RETURN doc").toArray();
result = db._query("FOR doc IN TestView OPTIONS { waitForSync: true } SORT doc.z RETURN doc").toArray();
assertEqual(2, result.length);
assertEqual(0, result[0].z);
assertEqual(2, result[1].z);

View File

@ -1,5 +1,5 @@
/*jshint globalstrict:false, strict:false */
/*global assertEqual */
/*global assertEqual, fail */
////////////////////////////////////////////////////////////////////////////////
/// @brief test the statement class
@ -35,10 +35,7 @@ var ArangoStatement = require("@arangodb/arango-statement").ArangoStatement;
var db = arangodb.db;
var ERRORS = arangodb.errors;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
const options = { optimizer: { rules: ["-cluster-one-shard"] } };
function ExplainSuite () {
'use strict';
@ -46,20 +43,12 @@ function ExplainSuite () {
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
setUpAll : function () {
db._drop(cn);
db._create(cn);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
tearDownAll : function () {
db._drop(cn);
},
@ -71,8 +60,8 @@ function ExplainSuite () {
var st = new ArangoStatement(db, { query : "for u in" });
try {
st.explain();
}
catch (e) {
fail();
} catch (e) {
assertEqual(ERRORS.ERROR_QUERY_PARSE.code, e.errorNum);
}
},
@ -82,11 +71,11 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainNoBindError : function () {
var st = new ArangoStatement(db, { query : "for i in [ 1 ] return @f" });
var st = new ArangoStatement(db, { query : "for i in [ 1 ] return @f", options });
try {
st.explain();
}
catch (e) {
fail();
} catch (e) {
assertEqual(ERRORS.ERROR_QUERY_BIND_PARAMETER_MISSING.code, e.errorNum);
}
},
@ -96,7 +85,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainWithBind : function () {
var st = new ArangoStatement(db, { query : "for i in [ 1 ] return @f", bindVars: { f : 99 } });
var st = new ArangoStatement(db, { query : "for i in [ 1 ] return @f", bindVars: { f : 99 }, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -146,7 +135,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainOk1 : function () {
var st = new ArangoStatement(db, { query : "for u in [ 1, 2, 3 ] return u" });
var st = new ArangoStatement(db, { query : "for u in [ 1, 2, 3 ] return u", options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -168,7 +157,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainOk2 : function () {
var st = new ArangoStatement(db, { query : "for u in [ 1, 2, 3 ] filter u != 1 for f in u return f" });
var st = new ArangoStatement(db, { query : "for u in [ 1, 2, 3 ] filter u != 1 for f in u return f", options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -198,7 +187,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainRemove1 : function () {
var st = new ArangoStatement(db, { query : "for u in " + cn + " remove u in " + cn });
var st = new ArangoStatement(db, { query : "for u in " + cn + " remove u in " + cn, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -223,7 +212,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainRemove2 : function () {
var st = new ArangoStatement(db, { query : "for u in @@cn remove u in @@cn", bindVars: { "@cn" : cn } });
var st = new ArangoStatement(db, { query : "for u in @@cn remove u in @@cn", bindVars: { "@cn" : cn }, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -248,7 +237,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainInsert1 : function () {
var st = new ArangoStatement(db, { query : "for u in @@cn insert u in @@cn", bindVars: { "@cn": cn } });
var st = new ArangoStatement(db, { query : "for u in @@cn insert u in @@cn", bindVars: { "@cn": cn }, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -287,7 +276,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainInsert2 : function () {
var st = new ArangoStatement(db, { query : "for u in " + cn + " insert u in " + cn });
var st = new ArangoStatement(db, { query : "for u in " + cn + " insert u in " + cn, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -328,7 +317,7 @@ function ExplainSuite () {
testExplainUpdate1 : function () {
var st = new ArangoStatement(db, {
query : "for u in @@cn update u._key with u in @@cn",
bindVars: { "@cn": cn }
bindVars: { "@cn": cn }, options
});
var nodes = st.explain().plan.nodes, node;
@ -371,7 +360,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainUpdate2 : function () {
var st = new ArangoStatement(db, { query : "for u in " + cn + " update u._key with u in " + cn });
var st = new ArangoStatement(db, { query : "for u in " + cn + " update u._key with u in " + cn, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -413,7 +402,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainUpdate3 : function () {
var st = new ArangoStatement(db, { query : "for u in @@cn update u in @@cn", bindVars: { "@cn": cn } });
var st = new ArangoStatement(db, { query : "for u in @@cn update u in @@cn", bindVars: { "@cn": cn }, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -452,7 +441,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainUpdate4 : function () {
var st = new ArangoStatement(db, { query : "for u in " + cn + " update u in " + cn });
var st = new ArangoStatement(db, { query : "for u in " + cn + " update u in " + cn, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -494,7 +483,7 @@ function ExplainSuite () {
testExplainReplace1 : function () {
var st = new ArangoStatement(db, {
query : "for u in @@cn replace u._key with u in @@cn",
bindVars: { "@cn": cn }
bindVars: { "@cn": cn }, options
});
var nodes = st.explain().plan.nodes, node;
@ -537,7 +526,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainReplace2 : function () {
var st = new ArangoStatement(db, { query : "for u in " + cn + " replace u._key with u in " + cn });
var st = new ArangoStatement(db, { query : "for u in " + cn + " replace u._key with u in " + cn, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -580,7 +569,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainReplace3 : function () {
var st = new ArangoStatement(db, { query : "for u in @@cn replace u in @@cn", bindVars: { "@cn": cn } });
var st = new ArangoStatement(db, { query : "for u in @@cn replace u in @@cn", bindVars: { "@cn": cn }, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -619,7 +608,7 @@ function ExplainSuite () {
////////////////////////////////////////////////////////////////////////////////
testExplainReplace4 : function () {
var st = new ArangoStatement(db, { query : "for u in " + cn + " replace u in " + cn });
var st = new ArangoStatement(db, { query : "for u in " + cn + " replace u in " + cn, options });
var nodes = st.explain().plan.nodes, node;
node = nodes[0];
@ -656,11 +645,5 @@ function ExplainSuite () {
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
jsunity.run(ExplainSuite);
return jsunity.done();

View File

@ -33,7 +33,6 @@ var jsunity = require("jsunity");
var arangodb = require("@arangodb");
var db = arangodb.db;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite: statements
////////////////////////////////////////////////////////////////////////////////
@ -42,23 +41,14 @@ function StatementSuiteNonCluster () {
'use strict';
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
db._useDatabase("_system");
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
try {
db._dropDatabase("UnitTestsDatabase0");
}
catch (err) {
} catch (err) {
// ignore this error
}
},
@ -68,7 +58,8 @@ function StatementSuiteNonCluster () {
////////////////////////////////////////////////////////////////////////////////
testExplainBindCollection : function () {
var st = db._createStatement({ query : "FOR i IN @@collection RETURN i" });
const options = { optimizer: { rules: ["-cluster-one-shard"] } };
var st = db._createStatement({ query : "FOR i IN @@collection RETURN i", options });
st.bind("@collection", "_users");
var result = st.explain();
@ -88,11 +79,5 @@ function StatementSuiteNonCluster () {
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
jsunity.run(StatementSuiteNonCluster);
return jsunity.done();

View File

@ -130,6 +130,8 @@ function optimizerCollectInClusterSuite () {
function optimizerCollectInClusterSingleShardSuite () {
let c;
let opt = { optimizer: { rules: ["-cluster-one-shard"] } };
let opt2 = { optimizer: { rules: ["-cluster-one-shard", "-interchange-adjacent-enumerations"] } };
return {
setUpAll : function () {
@ -148,11 +150,11 @@ function optimizerCollectInClusterSingleShardSuite () {
testSingleCount : function () {
let query = "FOR doc IN " + c.name() + " COLLECT WITH COUNT INTO length RETURN length";
let results = AQL_EXECUTE(query);
let results = AQL_EXECUTE(query, null, opt);
assertEqual(1, results.json.length);
assertEqual(1000, results.json[0]);
let plan = AQL_EXPLAIN(query).plan;
let plan = AQL_EXPLAIN(query, null, opt).plan;
let nodeTypes = plan.nodes.map(function(node) {
return node.type === 'IndexNode' ? 'EnumerateCollectionNode' : node.type;
});
@ -164,11 +166,11 @@ function optimizerCollectInClusterSingleShardSuite () {
testSingleCountMulti : function () {
let query = "FOR doc1 IN " + c.name() + " FILTER doc1.value < 10 FOR doc2 IN " + c.name() + " COLLECT WITH COUNT INTO length RETURN length";
let results = AQL_EXECUTE(query);
let results = AQL_EXECUTE(query, null, opt);
assertEqual(1, results.json.length);
assertEqual(10000, results.json[0]);
let plan = AQL_EXPLAIN(query).plan;
let plan = AQL_EXPLAIN(query, null, opt).plan;
let nodeTypes = plan.nodes.map(function(node) {
return node.type === 'IndexNode' ? 'EnumerateCollectionNode' : node.type;
});
@ -181,13 +183,13 @@ function optimizerCollectInClusterSingleShardSuite () {
testSingleDistinct : function () {
let query = "FOR doc IN " + c.name() + " SORT doc.value RETURN DISTINCT doc.value";
let results = AQL_EXECUTE(query);
let results = AQL_EXECUTE(query, null, opt);
assertEqual(1000, results.json.length);
for (let i = 0; i < 1000; ++i) {
assertEqual(i, results.json[i]);
}
let plan = AQL_EXPLAIN(query).plan;
let plan = AQL_EXPLAIN(query, null, opt).plan;
let nodeTypes = plan.nodes.map(function(node) {
return node.type;
});
@ -199,13 +201,13 @@ function optimizerCollectInClusterSingleShardSuite () {
testSingleDistinctMulti : function () {
let query = "FOR doc1 IN " + c.name() + " FILTER doc1.value < 10 FOR doc2 IN " + c.name() + " SORT doc2.value RETURN DISTINCT doc2.value";
let results = AQL_EXECUTE(query, null, { optimizer: { rules: ["-interchange-adjacent-enumerations"] } });
let results = AQL_EXECUTE(query, null, opt2);
assertEqual(1000, results.json.length);
for (let i = 0; i < 1000; ++i) {
assertEqual(i, results.json[i]);
}
let plan = AQL_EXPLAIN(query).plan;
let plan = AQL_EXPLAIN(query, null, opt2).plan;
let nodeTypes = plan.nodes.map(function(node) {
return node.type;
});

View File

@ -84,7 +84,7 @@ function optimizerRuleTestSuite() {
var loopto = 10;
internal.db._drop(colName);
skiplist = internal.db._create(colName);
skiplist = internal.db._create(colName, { numberOfShards: 2 });
var i, j;
for (j = 1; j <= loopto; ++j) {
for (i = 1; i <= loopto; ++i) {

View File

@ -40,7 +40,7 @@ function optimizerRuleTestSuite () {
var ruleName = "undistribute-remove-after-enum-coll";
// various choices to control the optimizer:
var rulesNone = { optimizer: { rules: [ "-all" ] } };
var rulesAll = { optimizer: { rules: [ "+all" ] } };
var rulesAll = { optimizer: { rules: [ "+all", "-cluster-one-shard" ] } };
var thisRuleEnabled = { optimizer: { rules: [ "-all", "+distribute-filtercalc-to-cluster", "+" + ruleName ] } };
var cn1 = "UnitTestsAqlOptimizerRuleUndist1";

View File

@ -55,7 +55,7 @@ function ahuacatlQueryOptimizerLimitTestSuite () {
setUpAll : function () {
internal.db._drop(cn);
collection = internal.db._create(cn);
collection = internal.db._create(cn, { numberOfShards: 2 });
let docs = [];
for (var i = 0; i < 100; ++i) {