mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'devel' of https://github.com/arangodb/arangodb into devel
This commit is contained in:
commit
8aaac6d5a2
|
@ -1,6 +1,14 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* fixed issue #2450
|
||||
|
||||
* fixed issue #2448
|
||||
|
||||
* fixed issue #2442
|
||||
|
||||
* added 'x-content-type-options: nosniff' to avoid MSIE bug
|
||||
|
||||
* set default value for `--ssl.protocol` from TLSv1 to TLSv1.2.
|
||||
|
||||
* AQL breaking change in cluster:
|
||||
|
@ -245,6 +253,7 @@ v3.1.19 (XXXX-XX-XX)
|
|||
* fixed issue #2440
|
||||
|
||||
|
||||
|
||||
v3.1.18 (2017-04-18)
|
||||
--------------------
|
||||
|
||||
|
|
|
@ -62,6 +62,7 @@ The request function takes the following options:
|
|||
* *json*: if `true`, *body* will be serialized to a JSON string and the *Content-Type* header will be set to `"application/json"`. Additionally the response body will also be parsed as JSON (unless *encoding* is set to `null`). Default: `false`.
|
||||
* *form* (optional): when set to a string or object and no *body* has been set, *body* will be set to a querystring representation of that value and the *Content-Type* header will be set to `"application/x-www-form-urlencoded"`. Also see *useQuerystring*.
|
||||
* *auth* (optional): an object with the properties *username* and *password* for HTTP Basic authentication or the property *bearer* for HTTP Bearer token authentication.
|
||||
* *sslProtocol* (optional): which tls version should be used to connect to the url. The default is `4` which is TLS 1.0. See [ssl protocol](../../Administration/Configuration/SSL.md#ssl-protocol) for more opitions.
|
||||
* *followRedirect*: whether HTTP 3xx redirects should be followed. Default: `true`.
|
||||
* *maxRedirects*: the maximum number of redirects to follow. Default: `10`.
|
||||
* *encoding*: encoding to be used for the response body. If set to `null`, the response body will be returned as a `Buffer`. Default: `"utf-8"`.
|
||||
|
|
|
@ -19,7 +19,7 @@ echo "CXX: $CXX"
|
|||
echo
|
||||
echo "$0: compiling ArangoDB"
|
||||
|
||||
(cd build && make -j2)
|
||||
(cd build && make -j4)
|
||||
|
||||
echo
|
||||
echo "$0: testing ArangoDB"
|
||||
|
|
|
@ -1448,11 +1448,6 @@ AgencyCommResult AgencyComm::sendWithFailover(
|
|||
"Inquiry failed (" << inq._statusCode << "). Keep trying ...";
|
||||
continue;
|
||||
}
|
||||
|
||||
AgencyCommManager::MANAGER->failed(std::move(connection), endpoint);
|
||||
endpoint.clear();
|
||||
connection = AgencyCommManager::MANAGER->acquire(endpoint);
|
||||
continue;
|
||||
}
|
||||
|
||||
// sometimes the agency will return a 307 (temporary redirect)
|
||||
|
|
|
@ -101,8 +101,20 @@ Agent::~Agent() {
|
|||
}
|
||||
}
|
||||
|
||||
shutdown();
|
||||
if (!isStopping()) {
|
||||
|
||||
{
|
||||
CONDITION_LOCKER(guardW, _waitForCV);
|
||||
guardW.broadcast();
|
||||
}
|
||||
{
|
||||
CONDITION_LOCKER(guardA, _appendCV);
|
||||
guardA.broadcast();
|
||||
}
|
||||
|
||||
shutdown();
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// State machine
|
||||
|
|
|
@ -38,7 +38,9 @@ Compactor::Compactor(Agent* agent) :
|
|||
|
||||
/// Dtor shuts down thread
|
||||
Compactor::~Compactor() {
|
||||
//shutdown();
|
||||
if (!isStopping()) {
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -82,7 +82,11 @@ Constituent::Constituent()
|
|||
_votedFor(NO_LEADER) {}
|
||||
|
||||
/// Shutdown if not already
|
||||
Constituent::~Constituent() { shutdown(); }
|
||||
Constituent::~Constituent() {
|
||||
if (!isStopping()) {
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/// Wait for sync
|
||||
bool Constituent::waitForSync() const { return _agent->config().waitForSync(); }
|
||||
|
|
|
@ -41,7 +41,11 @@ Inception::Inception() : Thread("Inception"), _agent(nullptr) {}
|
|||
Inception::Inception(Agent* agent) : Thread("Inception"), _agent(agent) {}
|
||||
|
||||
// Shutdown if not already
|
||||
Inception::~Inception() { shutdown(); }
|
||||
Inception::~Inception() {
|
||||
if (!isStopping()) {
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/// Gossip to others
|
||||
/// - Get snapshot of gossip peers and agent pool
|
||||
|
|
|
@ -242,7 +242,7 @@ std::vector<Job::shard_t> Job::clones(
|
|||
|
||||
std::vector<shard_t> ret;
|
||||
ret.emplace_back(collection, shard); // add (collection, shard) as first item
|
||||
typedef std::unordered_map<std::string, std::shared_ptr<Node>> UChildren;
|
||||
//typedef std::unordered_map<std::string, std::shared_ptr<Node>> UChildren;
|
||||
|
||||
try {
|
||||
std::string databasePath = planColPrefix + database,
|
||||
|
|
|
@ -151,7 +151,11 @@ Store& Store::operator=(Store&& rhs) {
|
|||
}
|
||||
|
||||
/// Default dtor
|
||||
Store::~Store() { shutdown(); }
|
||||
Store::~Store() {
|
||||
if (!isStopping()) {
|
||||
shutdown();
|
||||
}
|
||||
}
|
||||
|
||||
/// Apply array of queries multiple queries to store
|
||||
/// Return vector of according success
|
||||
|
|
|
@ -38,9 +38,6 @@
|
|||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
/// @brief typedef the template instantiation of the PathFinder
|
||||
typedef arangodb::graph::AttributeWeightShortestPathFinder ArangoDBPathFinder;
|
||||
|
||||
using namespace arangodb::aql;
|
||||
using namespace arangodb::graph;
|
||||
|
||||
|
@ -91,22 +88,12 @@ ShortestPathBlock::ShortestPathBlock(ExecutionEngine* engine,
|
|||
}
|
||||
_path = std::make_unique<arangodb::graph::ShortestPathResult>();
|
||||
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
if (_opts->useWeight()) {
|
||||
_finder.reset(
|
||||
new arangodb::graph::AttributeWeightShortestPathFinder(_opts));
|
||||
} else {
|
||||
_finder.reset(
|
||||
new arangodb::graph::ConstantWeightShortestPathFinder(_opts));
|
||||
}
|
||||
if (_opts->useWeight()) {
|
||||
_finder.reset(
|
||||
new arangodb::graph::AttributeWeightShortestPathFinder(_opts));
|
||||
} else {
|
||||
if (_opts->useWeight()) {
|
||||
_finder.reset(
|
||||
new arangodb::graph::AttributeWeightShortestPathFinder(_opts));
|
||||
} else {
|
||||
_finder.reset(
|
||||
new arangodb::graph::ConstantWeightShortestPathFinder(_opts));
|
||||
}
|
||||
_finder.reset(
|
||||
new arangodb::graph::ConstantWeightShortestPathFinder(_opts));
|
||||
}
|
||||
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
|
|
|
@ -476,6 +476,29 @@ void ClusterFeature::start() {
|
|||
}
|
||||
}
|
||||
|
||||
|
||||
void ClusterFeature::stop() {
|
||||
|
||||
if (_enableCluster) {
|
||||
if (_heartbeatThread != nullptr) {
|
||||
_heartbeatThread->beginShutdown();
|
||||
}
|
||||
|
||||
if (_heartbeatThread != nullptr) {
|
||||
int counter = 0;
|
||||
while (_heartbeatThread->isRunning()) {
|
||||
usleep(100000);
|
||||
// emit warning after 5 seconds
|
||||
if (++counter == 10 * 5) {
|
||||
LOG_TOPIC(WARN, arangodb::Logger::FIXME) << "waiting for heartbeat thread to finish";
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
void ClusterFeature::unprepare() {
|
||||
if (_enableCluster) {
|
||||
if (_heartbeatThread != nullptr) {
|
||||
|
|
|
@ -73,6 +73,8 @@ class ClusterFeature : public application_features::ApplicationFeature {
|
|||
|
||||
void setUnregisterOnShutdown(bool);
|
||||
|
||||
void stop() override final;
|
||||
|
||||
private:
|
||||
bool _unregisterOnShutdown;
|
||||
bool _enableCluster;
|
||||
|
|
|
@ -1300,6 +1300,18 @@ int ClusterInfo::dropCollectionCoordinator(std::string const& databaseName,
|
|||
AgencyComm ac;
|
||||
AgencyCommResult res;
|
||||
|
||||
// First check that no other collection has a distributeShardsLike
|
||||
// entry pointing to us:
|
||||
auto coll = getCollection(databaseName, collectionID);
|
||||
// not used # std::string id = std::to_string(coll->cid());
|
||||
auto colls = getCollections(databaseName);
|
||||
for (std::shared_ptr<LogicalCollection> const& p : colls) {
|
||||
if (p->distributeShardsLike() == coll->name() ||
|
||||
p->distributeShardsLike() == collectionID) {
|
||||
return TRI_ERROR_CLUSTER_MUST_NOT_DROP_COLL_OTHER_DISTRIBUTESHARDSLIKE;
|
||||
}
|
||||
}
|
||||
|
||||
double const realTimeout = getTimeout(timeout);
|
||||
double const endTime = TRI_microtime() + realTimeout;
|
||||
double const interval = getPollInterval();
|
||||
|
|
|
@ -2261,13 +2261,14 @@ std::unordered_map<std::string, std::vector<std::string>> distributeShards(
|
|||
|
||||
#ifndef USE_ENTERPRISE
|
||||
std::unique_ptr<LogicalCollection>
|
||||
ClusterMethods::createCollectionOnCoordinator(
|
||||
TRI_col_type_e collectionType, TRI_vocbase_t* vocbase, VPackSlice parameters,
|
||||
bool ignoreDistributeShardsLikeErrors) {
|
||||
ClusterMethods::createCollectionOnCoordinator(TRI_col_type_e collectionType,
|
||||
TRI_vocbase_t* vocbase,
|
||||
VPackSlice parameters,
|
||||
bool ignoreDistributeShardsLikeErrors) {
|
||||
auto col = std::make_unique<LogicalCollection>(vocbase, parameters);
|
||||
// Collection is a temporary collection object that undergoes sanity checks etc.
|
||||
// It is not used anywhere and will be cleaned up after this call.
|
||||
// Persist collection will return the real object.
|
||||
// Collection is a temporary collection object that undergoes sanity checks etc.
|
||||
// It is not used anywhere and will be cleaned up after this call.
|
||||
// Persist collection will return the real object.
|
||||
return persistCollectionInAgency(col.get(), ignoreDistributeShardsLikeErrors);
|
||||
}
|
||||
#endif
|
||||
|
@ -2282,22 +2283,22 @@ ClusterMethods::persistCollectionInAgency(
|
|||
std::string distributeShardsLike = col->distributeShardsLike();
|
||||
std::vector<std::string> dbServers;
|
||||
std::vector<std::string> avoid = col->avoidServers();
|
||||
|
||||
bool chainOfDistributeShardsLike = false;
|
||||
|
||||
|
||||
ClusterInfo* ci = ClusterInfo::instance();
|
||||
if (!distributeShardsLike.empty()) {
|
||||
|
||||
CollectionNameResolver resolver(col->vocbase());
|
||||
TRI_voc_cid_t otherCid =
|
||||
resolver.getCollectionIdCluster(distributeShardsLike);
|
||||
|
||||
if (otherCid != 0) {
|
||||
bool chainOfDistributeShardsLike = false;
|
||||
|
||||
std::string otherCidString
|
||||
= arangodb::basics::StringUtils::itoa(otherCid);
|
||||
|
||||
try {
|
||||
std::shared_ptr<LogicalCollection> collInfo =
|
||||
ci->getCollection(col->dbName(), otherCidString);
|
||||
ci->getCollection(col->dbName(), otherCidString);
|
||||
if (!collInfo->distributeShardsLike().empty()) {
|
||||
chainOfDistributeShardsLike = true;
|
||||
}
|
||||
|
@ -2312,21 +2313,21 @@ ClusterMethods::persistCollectionInAgency(
|
|||
}
|
||||
}
|
||||
} catch (...) {}
|
||||
|
||||
|
||||
if (chainOfDistributeShardsLike) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_CHAIN_OF_DISTRIBUTESHARDSLIKE);
|
||||
}
|
||||
|
||||
col->distributeShardsLike(otherCidString);
|
||||
} else {
|
||||
LOG_TOPIC(WARN, Logger::CLUSTER) << "WTF? " << ignoreDistributeShardsLikeErrors;
|
||||
if (ignoreDistributeShardsLikeErrors) {
|
||||
col->distributeShardsLike(std::string());
|
||||
} else {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_UNKNOWN_DISTRIBUTESHARDSLIKE);
|
||||
}
|
||||
}
|
||||
} else if(!avoid.empty()) {
|
||||
|
||||
} else if (!avoid.empty()) {
|
||||
size_t replicationFactor = col->replicationFactor();
|
||||
dbServers = ci->getCurrentDBServers();
|
||||
if (dbServers.size() - avoid.size() >= replicationFactor) {
|
||||
|
@ -2337,9 +2338,8 @@ ClusterMethods::persistCollectionInAgency(
|
|||
}), dbServers.end());
|
||||
}
|
||||
std::random_shuffle(dbServers.begin(), dbServers.end());
|
||||
|
||||
}
|
||||
|
||||
|
||||
// If the list dbServers is still empty, it will be filled in
|
||||
// distributeShards below.
|
||||
|
||||
|
|
|
@ -267,7 +267,7 @@ class ClusterMethods {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static std::unique_ptr<LogicalCollection> persistCollectionInAgency(
|
||||
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors);
|
||||
LogicalCollection* col, bool ignoreDistributeShardsLikeErrors = false);
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
|
|
@ -825,10 +825,8 @@ void HeartbeatThread::syncDBServerStatusQuo() {
|
|||
_backgroundJobScheduledOrRunning = true;
|
||||
|
||||
// the JobGuard is in the operator() of HeartbeatBackgroundJob
|
||||
if (!isStopping() && !_ioService->stopped()) {
|
||||
_ioService->
|
||||
post(HeartbeatBackgroundJob(shared_from_this(), TRI_microtime()));
|
||||
}
|
||||
_ioService->
|
||||
post(HeartbeatBackgroundJob(shared_from_this(), TRI_microtime()));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -59,7 +59,7 @@ TraverserEngineRegistry::EngineInfo::EngineInfo(TRI_vocbase_t* vocbase,
|
|||
VPackSlice info)
|
||||
: _isInUse(false),
|
||||
_toBeDeleted(false),
|
||||
_engine(std::move(BaseEngine::BuildEngine(vocbase, info))),
|
||||
_engine(BaseEngine::BuildEngine(vocbase, info)),
|
||||
_timeToLive(0),
|
||||
_expires(0) {}
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ void GeneralCommTask::executeRequest(
|
|||
GeneralServerFeature::HANDLER_FACTORY->createHandler(
|
||||
std::move(request), std::move(response)));
|
||||
|
||||
// transfer statistics into handler
|
||||
// give up, if we cannot find a handler
|
||||
if (handler == nullptr) {
|
||||
LOG_TOPIC(TRACE, arangodb::Logger::FIXME) << "no handler is known, giving up";
|
||||
handleSimpleError(rest::ResponseCode::NOT_FOUND, messageId);
|
||||
|
|
|
@ -129,6 +129,10 @@ void HttpCommTask::addResponse(HttpResponse* response,
|
|||
StaticStrings::ExposedCorsHeaders);
|
||||
}
|
||||
|
||||
// use "IfNotSet"
|
||||
response->setHeaderNCIfNotSet(StaticStrings::XContentTypeOptions,
|
||||
StaticStrings::NoSniff);
|
||||
|
||||
// set "connection" header, keep-alive is the default
|
||||
response->setConnectionType(_closeRequested
|
||||
? rest::ConnectionType::C_CLOSE
|
||||
|
@ -160,13 +164,27 @@ void HttpCommTask::addResponse(HttpResponse* response,
|
|||
if (!buffer._buffer->empty()) {
|
||||
LOG_TOPIC(TRACE, Logger::REQUESTS)
|
||||
<< "\"http-request-response\",\"" << (void*)this << "\",\"" << _fullUrl
|
||||
<< "\",\"" << StringUtils::escapeUnicode(std::string(
|
||||
buffer._buffer->c_str(), buffer._buffer->length()))
|
||||
<< "\",\""
|
||||
<< StringUtils::escapeUnicode(
|
||||
std::string(buffer._buffer->c_str(), buffer._buffer->length()))
|
||||
<< "\"";
|
||||
}
|
||||
|
||||
// append write buffer and statistics
|
||||
double const totalTime = RequestStatistics::ELAPSED_SINCE_READ_START(stat);
|
||||
|
||||
if (stat != nullptr && arangodb::Logger::isEnabled(arangodb::LogLevel::TRACE,
|
||||
Logger::REQUESTS)) {
|
||||
LOG_TOPIC(TRACE, Logger::REQUESTS)
|
||||
<< "\"http-request-statistics\",\"" << (void*)this << "\",\""
|
||||
<< _connectionInfo.clientAddress << "\",\""
|
||||
<< HttpRequest::translateMethod(_requestType) << "\",\""
|
||||
<< HttpRequest::translateVersion(_protocolVersion) << "\","
|
||||
<< static_cast<int>(response->responseCode()) << ","
|
||||
<< _originalBodyLength << "," << responseBodyLength << ",\"" << _fullUrl
|
||||
<< "\"," << stat->timingsCsv();
|
||||
}
|
||||
|
||||
addWriteBuffer(buffer);
|
||||
|
||||
// and give some request information
|
||||
|
@ -700,7 +718,7 @@ std::unique_ptr<GeneralResponse> HttpCommTask::createResponse(
|
|||
}
|
||||
|
||||
void HttpCommTask::compactify() {
|
||||
if (! _newRequest) {
|
||||
if (!_newRequest) {
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -731,7 +749,7 @@ void HttpCommTask::compactify() {
|
|||
TRI_ASSERT(_bodyPosition >= _readPosition);
|
||||
_bodyPosition -= _readPosition;
|
||||
}
|
||||
|
||||
|
||||
_readPosition = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -139,6 +139,15 @@ void VppCommTask::addResponse(VppResponse* response, RequestStatistics* stat) {
|
|||
auto buffers = createChunkForNetwork(slices, id, chunkSize, false);
|
||||
double const totalTime = RequestStatistics::ELAPSED_SINCE_READ_START(stat);
|
||||
|
||||
if (stat != nullptr && arangodb::Logger::isEnabled(arangodb::LogLevel::TRACE,
|
||||
Logger::REQUESTS)) {
|
||||
LOG_TOPIC(TRACE, Logger::REQUESTS)
|
||||
<< "\"vst-request-statistics\",\"" << (void*)this << "\",\""
|
||||
<< VppRequest::translateVersion(_protocolVersion) << "\","
|
||||
<< static_cast<int>(response->responseCode()) << ","
|
||||
<< _connectionInfo.clientAddress << "\"," << stat->timingsCsv();
|
||||
}
|
||||
|
||||
if (buffers.empty()) {
|
||||
if (stat != nullptr) {
|
||||
stat->release();
|
||||
|
|
|
@ -708,10 +708,16 @@ bool Index::canUseConditionPart(
|
|||
other->type == arangodb::aql::NODE_TYPE_ATTRIBUTE_ACCESS)) {
|
||||
// value IN a.b OR value IN a.b[*]
|
||||
arangodb::aql::Ast::getReferencedVariables(access, variables);
|
||||
if (other->type == arangodb::aql::NODE_TYPE_ATTRIBUTE_ACCESS &&
|
||||
variables.find(reference) != variables.end()) {
|
||||
variables.clear();
|
||||
arangodb::aql::Ast::getReferencedVariables(other, variables);
|
||||
}
|
||||
} else {
|
||||
// a.b == value OR a.b IN values
|
||||
arangodb::aql::Ast::getReferencedVariables(other, variables);
|
||||
}
|
||||
|
||||
if (variables.find(reference) != variables.end()) {
|
||||
// yes. then we cannot use an index here
|
||||
return false;
|
||||
|
@ -758,22 +764,30 @@ void Index::expandInSearchValues(VPackSlice const base,
|
|||
VPackSlice current = oneLookup.at(i);
|
||||
if (current.hasKey(StaticStrings::IndexIn)) {
|
||||
VPackSlice inList = current.get(StaticStrings::IndexIn);
|
||||
if (!inList.isArray()) {
|
||||
// IN value is a non-array
|
||||
result.clear();
|
||||
result.openArray();
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_ASSERT(inList.isArray());
|
||||
VPackValueLength nList = inList.length();
|
||||
|
||||
if (nList == 0) {
|
||||
// Empty Array. short circuit, no matches possible
|
||||
result.clear();
|
||||
result.openArray();
|
||||
return;
|
||||
}
|
||||
|
||||
std::unordered_set<VPackSlice,
|
||||
arangodb::basics::VelocyPackHelper::VPackHash,
|
||||
arangodb::basics::VelocyPackHelper::VPackEqual>
|
||||
tmp(static_cast<size_t>(inList.length()),
|
||||
tmp(static_cast<size_t>(nList),
|
||||
arangodb::basics::VelocyPackHelper::VPackHash(),
|
||||
arangodb::basics::VelocyPackHelper::VPackEqual());
|
||||
|
||||
TRI_ASSERT(inList.isArray());
|
||||
if (inList.length() == 0) {
|
||||
// Empty Array. short circuit, no matches possible
|
||||
result.clear();
|
||||
result.openArray();
|
||||
result.close();
|
||||
return;
|
||||
}
|
||||
for (auto const& el : VPackArrayIterator(inList)) {
|
||||
tmp.emplace(el);
|
||||
}
|
||||
|
|
|
@ -58,7 +58,6 @@ class LogicalCollection;
|
|||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
;
|
||||
|
||||
/// @brief a base class to iterate over the index. An iterator is requested
|
||||
/// at the index itself
|
||||
|
@ -102,23 +101,23 @@ class IndexIterator {
|
|||
|
||||
/// @brief Special iterator if the condition cannot have any result
|
||||
class EmptyIndexIterator final : public IndexIterator {
|
||||
public:
|
||||
EmptyIndexIterator(LogicalCollection* collection, transaction::Methods* trx, ManagedDocumentResult* mmdr, arangodb::Index const* index)
|
||||
: IndexIterator(collection, trx, mmdr, index) {}
|
||||
public:
|
||||
EmptyIndexIterator(LogicalCollection* collection, transaction::Methods* trx, ManagedDocumentResult* mmdr, arangodb::Index const* index)
|
||||
: IndexIterator(collection, trx, mmdr, index) {}
|
||||
|
||||
~EmptyIndexIterator() {}
|
||||
~EmptyIndexIterator() {}
|
||||
|
||||
char const* typeName() const override { return "empty-index-iterator"; }
|
||||
char const* typeName() const override { return "empty-index-iterator"; }
|
||||
|
||||
bool next(TokenCallback const&, size_t) override {
|
||||
return false;
|
||||
}
|
||||
bool next(TokenCallback const&, size_t) override {
|
||||
return false;
|
||||
}
|
||||
|
||||
void reset() override {}
|
||||
void reset() override {}
|
||||
|
||||
void skip(uint64_t, uint64_t& skipped) override {
|
||||
skipped = 0;
|
||||
}
|
||||
void skip(uint64_t, uint64_t& skipped) override {
|
||||
skipped = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/// @brief a wrapper class to iterate over several IndexIterators.
|
||||
|
|
|
@ -448,14 +448,15 @@ IndexIterator* MMFilesEdgeIndex::iteratorForCondition(
|
|||
if (comp->type == aql::NODE_TYPE_OPERATOR_BINARY_IN) {
|
||||
// a.b IN values
|
||||
if (!valNode->isArray()) {
|
||||
return nullptr;
|
||||
// a.b IN non-array
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
return createInIterator(trx, mmdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
// operator type unsupported
|
||||
return nullptr;
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
/// @brief specializes the condition for use with the index
|
||||
|
|
|
@ -120,8 +120,10 @@ MMFilesHashIndexLookupBuilder::MMFilesHashIndexLookupBuilder(
|
|||
TRI_IF_FAILURE("Index::permutationIN") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
for (auto const& value : VPackArrayIterator(values)) {
|
||||
tmp.emplace(value);
|
||||
if (values.isArray()) {
|
||||
for (auto const& value : VPackArrayIterator(values)) {
|
||||
tmp.emplace(value);
|
||||
}
|
||||
}
|
||||
if (tmp.empty()) {
|
||||
// IN [] short-circuit, cannot be fullfilled;
|
||||
|
|
|
@ -471,14 +471,15 @@ IndexIterator* MMFilesPrimaryIndex::iteratorForCondition(
|
|||
} else if (comp->type == aql::NODE_TYPE_OPERATOR_BINARY_IN) {
|
||||
// a.b IN values
|
||||
if (!valNode->isArray()) {
|
||||
return nullptr;
|
||||
// a.b IN non-array
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
return createInIterator(trx, mmdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
// operator type unsupported
|
||||
return nullptr;
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
/// @brief specializes the condition for use with the index
|
||||
|
|
|
@ -42,6 +42,7 @@ MMFilesWalRecoveryFeature::MMFilesWalRecoveryFeature(ApplicationServer* server)
|
|||
startsAfter("Database");
|
||||
startsAfter("MMFilesLogfileManager");
|
||||
startsAfter("MMFilesPersistentIndex");
|
||||
startsAfter("Scheduler");
|
||||
|
||||
onlyEnabledWith("MMFilesEngine");
|
||||
onlyEnabledWith("MMFilesLogfileManager");
|
||||
|
|
|
@ -464,10 +464,11 @@ void RocksDBCollection::truncate(transaction::Methods* trx,
|
|||
// TODO maybe we could also reuse Index::drop, if we ensure the
|
||||
// implementations
|
||||
// don't do anything beyond deleting their contents
|
||||
RocksDBKeyBounds indexBounds =
|
||||
RocksDBKeyBounds::PrimaryIndex(42); // default constructor?
|
||||
for (std::shared_ptr<Index> const& index : _indexes) {
|
||||
RocksDBIndex* rindex = static_cast<RocksDBIndex*>(index.get());
|
||||
|
||||
RocksDBKeyBounds indexBounds =
|
||||
RocksDBKeyBounds::Empty();
|
||||
switch (rindex->type()) {
|
||||
case RocksDBIndex::TRI_IDX_TYPE_PRIMARY_INDEX:
|
||||
indexBounds = RocksDBKeyBounds::PrimaryIndex(rindex->objectId());
|
||||
|
|
|
@ -24,29 +24,32 @@
|
|||
#include "RocksDBCounterManager.h"
|
||||
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBKey.h"
|
||||
#include "RocksDBEngine/RocksDBKeyBounds.h"
|
||||
#include "RocksDBEngine/RocksDBValue.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
#include <rocksdb/utilities/transaction_db.h>
|
||||
#include <rocksdb/utilities/write_batch_with_index.h>
|
||||
#include <rocksdb/write_batch.h>
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/Parser.h>
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
RocksDBCounterManager::CMValue::CMValue(VPackSlice const& slice) {
|
||||
RocksDBCounterManager::CMValue::CMValue(VPackSlice const& slice)
|
||||
: _sequenceNum(0), _count(0), _revisionId(0) {
|
||||
if (!slice.isArray()) {
|
||||
// got a somewhat invalid slice. probably old data from before the key structure changes
|
||||
return;
|
||||
}
|
||||
TRI_ASSERT(slice.isArray());
|
||||
|
||||
velocypack::ArrayIterator array(slice);
|
||||
if (array.valid()) {
|
||||
|
@ -146,6 +149,10 @@ void RocksDBCounterManager::removeCounter(uint64_t objectId) {
|
|||
|
||||
/// Thread-Safe force sync
|
||||
Result RocksDBCounterManager::sync(bool force) {
|
||||
#if 0
|
||||
writeSettings();
|
||||
#endif
|
||||
|
||||
if (force) {
|
||||
while(true) {
|
||||
bool expected = false;
|
||||
|
@ -198,8 +205,7 @@ Result RocksDBCounterManager::sync(bool force) {
|
|||
}
|
||||
}
|
||||
|
||||
// we have to commit all counters in one batch otherwise
|
||||
// there would be the possibility of
|
||||
// we have to commit all counters in one batch
|
||||
rocksdb::Status s = rtrx->Commit();
|
||||
if (s.ok()) {
|
||||
for (std::pair<uint64_t, CMValue> const& pair : copy) {
|
||||
|
@ -211,36 +217,48 @@ Result RocksDBCounterManager::sync(bool force) {
|
|||
}
|
||||
|
||||
void RocksDBCounterManager::readSettings() {
|
||||
#if 0
|
||||
RocksDBKey key = RocksDBKey::SettingsValue();
|
||||
|
||||
std::string result;
|
||||
rocksdb::Status status = _db->Get(rocksdb::ReadOptions(), key.string(), &result);
|
||||
if (status.ok()) {
|
||||
// key may not be there...
|
||||
// key may not be there, so don't fail when not found
|
||||
VPackSlice slice = VPackSlice(result.data());
|
||||
TRI_ASSERT(slice.isObject());
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "read initial settings: " << slice.toJson();
|
||||
|
||||
if (!result.empty()) {
|
||||
try {
|
||||
std::shared_ptr<VPackBuilder> builder = VPackParser::fromJson(result);
|
||||
VPackSlice s = builder->slice();
|
||||
|
||||
uint64_t lastTick = basics::VelocyPackHelper::stringUInt64(s.get("tick"));
|
||||
TRI_UpdateTickServer(lastTick);
|
||||
} catch (...) {
|
||||
LOG_TOPIC(WARN, Logger::ENGINES) << "unable to read initial settings: invalid data";
|
||||
}
|
||||
}
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
void RocksDBCounterManager::writeSettings() {
|
||||
#if 0
|
||||
RocksDBKey key = RocksDBKey::SettingsValue();
|
||||
|
||||
VPackBuilder builder;
|
||||
builder.openObject();
|
||||
builder.add("tick", VPackValue(std::to_string(TRI_CurrentTickServer())));
|
||||
builder.close();
|
||||
|
||||
VPackSlice slice = builder.slice();
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "writing settings: " << slice.toJson();
|
||||
|
||||
rocksdb::Slice value(slice.startAs<char>(), slice.byteSize());
|
||||
|
||||
rocksdb::Status status = _db->Put(rocksdb::WriteOptions(), key.string(), value);
|
||||
|
||||
if (status.ok()) {
|
||||
// TODO
|
||||
if (!status.ok()) {
|
||||
LOG_TOPIC(TRACE, Logger::ENGINES) << "writing settings failed";
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
/// Parse counter values from rocksdb
|
||||
|
@ -271,7 +289,7 @@ struct WBReader : public rocksdb::WriteBatch::Handler {
|
|||
std::unordered_map<uint64_t, RocksDBCounterManager::CounterAdjustment> deltas;
|
||||
rocksdb::SequenceNumber currentSeqNum;
|
||||
|
||||
explicit WBReader() {}
|
||||
explicit WBReader() : currentSeqNum(0) {}
|
||||
|
||||
bool prepKey(const rocksdb::Slice& key) {
|
||||
if (RocksDBKey::type(key) == RocksDBEntryType::Document) {
|
||||
|
|
|
@ -99,7 +99,6 @@ class RocksDBCounterManager {
|
|||
explicit CMValue(arangodb::velocypack::Slice const&);
|
||||
void serialize(arangodb::velocypack::Builder&) const;
|
||||
};
|
||||
|
||||
|
||||
void readSettings();
|
||||
void writeSettings();
|
||||
|
|
|
@ -104,7 +104,10 @@ bool RocksDBEdgeIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
|
||||
// acquire rocksdb collection
|
||||
auto rocksColl = RocksDBCollection::toRocksDBCollection(_collection);
|
||||
while (limit > 0) {
|
||||
|
||||
while (true) {
|
||||
TRI_ASSERT(limit > 0);
|
||||
|
||||
while (_iterator->Valid() &&
|
||||
(_index->_cmp->Compare(_iterator->key(), _bounds.end()) < 0)) {
|
||||
StringRef edgeKey = RocksDBKey::primaryKey(_iterator->key());
|
||||
|
@ -121,14 +124,12 @@ bool RocksDBEdgeIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
}
|
||||
} // TODO do we need to handle failed lookups here?
|
||||
}
|
||||
if (limit > 0) {
|
||||
_keysIterator.next();
|
||||
if (!updateBounds()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
_keysIterator.next();
|
||||
if (!updateBounds()) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
void RocksDBEdgeIndexIterator::reset() {
|
||||
|
@ -323,14 +324,15 @@ IndexIterator* RocksDBEdgeIndex::iteratorForCondition(
|
|||
if (comp->type == aql::NODE_TYPE_OPERATOR_BINARY_IN) {
|
||||
// a.b IN values
|
||||
if (!valNode->isArray()) {
|
||||
return nullptr;
|
||||
// a.b IN non-array
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
return createInIterator(trx, mmdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
// operator type unsupported
|
||||
return nullptr;
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
/// @brief specializes the condition for use with the index
|
||||
|
|
|
@ -170,6 +170,13 @@ void RocksDBEngine::start() {
|
|||
static_cast<int>(opts->_maxBytesForLevelMultiplier);
|
||||
_options.verify_checksums_in_compaction = opts->_verifyChecksumsInCompaction;
|
||||
_options.optimize_filters_for_hits = opts->_optimizeFiltersForHits;
|
||||
_options.use_direct_reads = opts->_useDirectReads;
|
||||
_options.use_direct_writes = opts->_useDirectWrites;
|
||||
if (opts->_skipCorrupted) {
|
||||
_options.wal_recovery_mode = rocksdb::WALRecoveryMode::kSkipAnyCorruptedRecords;
|
||||
} else {
|
||||
_options.wal_recovery_mode = rocksdb::WALRecoveryMode::kPointInTimeRecovery;
|
||||
}
|
||||
|
||||
_options.base_background_compactions =
|
||||
static_cast<int>(opts->_baseBackgroundCompactions);
|
||||
|
@ -178,11 +185,14 @@ void RocksDBEngine::start() {
|
|||
|
||||
_options.max_log_file_size = static_cast<size_t>(opts->_maxLogFileSize);
|
||||
_options.keep_log_file_num = static_cast<size_t>(opts->_keepLogFileNum);
|
||||
_options.recycle_log_file_num = static_cast<size_t>(opts->_recycleLogFileNum);
|
||||
_options.log_file_time_to_roll =
|
||||
static_cast<size_t>(opts->_logFileTimeToRoll);
|
||||
_options.compaction_readahead_size =
|
||||
static_cast<size_t>(opts->_compactionReadaheadSize);
|
||||
|
||||
_options.IncreaseParallelism(TRI_numberProcessors());
|
||||
|
||||
_options.create_if_missing = true;
|
||||
_options.max_open_files = -1;
|
||||
_options.comparator = _cmp.get();
|
||||
|
|
|
@ -96,6 +96,7 @@ void RocksDBIndex::createCache() {
|
|||
if (!_useCache || _cachePresent) {
|
||||
// we should not get here if we do not need the cache
|
||||
// or if cache already created
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_ASSERT(_cache.get() == nullptr);
|
||||
|
|
|
@ -35,6 +35,10 @@ using namespace arangodb::velocypack;
|
|||
|
||||
const char RocksDBKeyBounds::_stringSeparator = '\0';
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::Empty() {
|
||||
return RocksDBKeyBounds();
|
||||
}
|
||||
|
||||
RocksDBKeyBounds RocksDBKeyBounds::Databases() {
|
||||
return RocksDBKeyBounds(RocksDBEntryType::Database);
|
||||
}
|
||||
|
@ -98,6 +102,11 @@ rocksdb::Slice const RocksDBKeyBounds::end() const {
|
|||
return rocksdb::Slice(_endBuffer);
|
||||
}
|
||||
|
||||
// constructor for an empty bound. do not use for anything but to
|
||||
// default-construct a key bound!
|
||||
RocksDBKeyBounds::RocksDBKeyBounds()
|
||||
: _type(RocksDBEntryType::Database), _startBuffer(), _endBuffer() {}
|
||||
|
||||
RocksDBKeyBounds::RocksDBKeyBounds(RocksDBEntryType type)
|
||||
: _type(type), _startBuffer(), _endBuffer() {
|
||||
switch (_type) {
|
||||
|
|
|
@ -38,8 +38,11 @@ namespace arangodb {
|
|||
|
||||
class RocksDBKeyBounds {
|
||||
public:
|
||||
RocksDBKeyBounds() = delete;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief empty bounds
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
static RocksDBKeyBounds Empty();
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief Bounds for list of all databases
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -125,7 +128,8 @@ class RocksDBKeyBounds {
|
|||
rocksdb::Slice const end() const;
|
||||
|
||||
private:
|
||||
RocksDBKeyBounds(RocksDBEntryType type);
|
||||
RocksDBKeyBounds();
|
||||
explicit RocksDBKeyBounds(RocksDBEntryType type);
|
||||
RocksDBKeyBounds(RocksDBEntryType type, uint64_t first);
|
||||
RocksDBKeyBounds(RocksDBEntryType type, uint64_t first,
|
||||
std::string const& second);
|
||||
|
|
|
@ -502,14 +502,15 @@ IndexIterator* RocksDBPrimaryIndex::iteratorForCondition(
|
|||
} else if (comp->type == aql::NODE_TYPE_OPERATOR_BINARY_IN) {
|
||||
// a.b IN values
|
||||
if (!valNode->isArray()) {
|
||||
return nullptr;
|
||||
// a.b IN non-array
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
return createInIterator(trx, mmdr, attrNode, valNode);
|
||||
}
|
||||
|
||||
// operator type unsupported
|
||||
return nullptr;
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
|
||||
/// @brief specializes the condition for use with the index
|
||||
|
|
|
@ -141,8 +141,7 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
|
|||
|
||||
VPackBuilder builder(&_vpackOptions);
|
||||
|
||||
uint64_t size = 0;
|
||||
auto cb = [this, &type, &buff, &adapter, &size,
|
||||
auto cb = [this, &type, &buff, &adapter,
|
||||
&builder](DocumentIdentifierToken const& token) {
|
||||
builder.clear();
|
||||
|
||||
|
@ -169,10 +168,9 @@ RocksDBReplicationResult RocksDBReplicationContext::dump(
|
|||
VPackSlice slice = builder.slice();
|
||||
dumper.dump(slice);
|
||||
buff.appendChar('\n');
|
||||
size += (slice.byteSize() + 1);
|
||||
};
|
||||
|
||||
while (_hasMore && (size < chunkSize)) {
|
||||
while (_hasMore && buff.length() < chunkSize) {
|
||||
try {
|
||||
_hasMore = _iter->next(cb, 10); // TODO: adjust limit?
|
||||
} catch (std::exception const& ex) {
|
||||
|
@ -191,7 +189,7 @@ arangodb::Result RocksDBReplicationContext::dumpKeyChunks(VPackBuilder& b,
|
|||
TRI_ASSERT(_iter);
|
||||
|
||||
RocksDBAllIndexIterator* primary =
|
||||
dynamic_cast<RocksDBAllIndexIterator*>(_iter.get());
|
||||
static_cast<RocksDBAllIndexIterator*>(_iter.get());
|
||||
|
||||
std::string lowKey;
|
||||
VPackSlice highKey; // FIXME: no good keeping this
|
||||
|
@ -266,7 +264,7 @@ arangodb::Result RocksDBReplicationContext::dumpKeys(VPackBuilder& b,
|
|||
}
|
||||
|
||||
RocksDBAllIndexIterator* primary =
|
||||
dynamic_cast<RocksDBAllIndexIterator*>(_iter.get());
|
||||
static_cast<RocksDBAllIndexIterator*>(_iter.get());
|
||||
auto cb = [&](DocumentIdentifierToken const& token, StringRef const& key) {
|
||||
RocksDBToken const& rt = static_cast<RocksDBToken const&>(token);
|
||||
|
||||
|
|
|
@ -57,6 +57,7 @@ class WBReader : public rocksdb::WriteBatch::Handler {
|
|||
_builder.add(
|
||||
"type",
|
||||
VPackValue(static_cast<uint64_t>(REPLICATION_COLLECTION_CREATE)));
|
||||
break;
|
||||
}
|
||||
case RocksDBEntryType::Document: {
|
||||
_builder.add(
|
||||
|
|
|
@ -757,11 +757,16 @@ void RocksDBRestReplicationHandler::handleCommandInventory() {
|
|||
if (found) {
|
||||
ctx = _manager->find(StringUtils::uint64(batchId), busy);
|
||||
}
|
||||
if (!found || busy || ctx == nullptr) {
|
||||
if (!found) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_CURSOR_NOT_FOUND,
|
||||
"batchId not specified");
|
||||
return;
|
||||
}
|
||||
if (busy || ctx == nullptr) {
|
||||
generateError(rest::ResponseCode::NOT_FOUND, TRI_ERROR_CURSOR_NOT_FOUND,
|
||||
"context is busy or nullptr");
|
||||
return;
|
||||
}
|
||||
RocksDBReplicationContextGuard(_manager, ctx);
|
||||
|
||||
TRI_voc_tick_t tick = TRI_CurrentTickServer();
|
||||
|
|
|
@ -292,7 +292,7 @@ RocksDBOperationResult RocksDBTransactionState::addOperation(
|
|||
static_cast<RocksDBTransactionCollection*>(findCollection(cid));
|
||||
|
||||
if (collection == nullptr) {
|
||||
std::string message = "collection '" + collection->collectionName() +
|
||||
std::string message = "collection '" + std::to_string(cid) +
|
||||
"' not found in transaction state";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, message);
|
||||
}
|
||||
|
|
|
@ -1147,7 +1147,7 @@ IndexIterator* RocksDBVPackIndex::iteratorForCondition(
|
|||
// unsupported right now. Should have been rejected by
|
||||
// supportsFilterCondition
|
||||
TRI_ASSERT(false);
|
||||
return nullptr;
|
||||
return new EmptyIndexIterator(_collection, trx, mmdr, this);
|
||||
}
|
||||
value->toVelocyPackValue(searchValues);
|
||||
}
|
||||
|
|
|
@ -72,6 +72,8 @@ class JobQueueThread final
|
|||
std::shared_ptr<Job> job(jobPtr);
|
||||
|
||||
_scheduler->post([this, self, job]() {
|
||||
RequestStatistics::SET_QUEUE_END(job->_handler->statistics());
|
||||
|
||||
try {
|
||||
job->_callback(std::move(job->_handler));
|
||||
} catch (std::exception& e) {
|
||||
|
|
|
@ -34,6 +34,7 @@
|
|||
#include "Basics/MutexLocker.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/Thread.h"
|
||||
#include "GeneralServer/RestHandler.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Rest/GeneralResponse.h"
|
||||
#include "Scheduler/JobGuard.h"
|
||||
|
@ -328,6 +329,10 @@ bool Scheduler::hasQueueCapacity() const {
|
|||
}
|
||||
|
||||
bool Scheduler::queue(std::unique_ptr<Job> job) {
|
||||
auto jobQueue = _jobQueue.get();
|
||||
auto queueSize = (jobQueue == nullptr) ? 0 : jobQueue->queueSize();
|
||||
RequestStatistics::SET_QUEUE_START(job->_handler->statistics(), queueSize);
|
||||
|
||||
return _jobQueue->queue(std::move(job));
|
||||
}
|
||||
|
||||
|
|
|
@ -213,6 +213,20 @@ void RequestStatistics::fill(StatisticsDistribution& totalTime,
|
|||
bytesReceived = *TRI_BytesReceivedDistributionStatistics;
|
||||
}
|
||||
|
||||
std::string RequestStatistics::timingsCsv() {
|
||||
std::stringstream ss;
|
||||
|
||||
ss << std::setprecision(9) << std::fixed
|
||||
<< "read," << (_readEnd - _readStart)
|
||||
<< ",queue," << (_queueEnd - _queueStart)
|
||||
<< ",queue-size," << _queueSize
|
||||
<< ",request," << (_requestEnd - _requestStart)
|
||||
<< ",total," << (StatisticsFeature::time() - _readStart)
|
||||
<< ",error," << (_executeError ? "true" : "false");
|
||||
|
||||
return ss.str();
|
||||
}
|
||||
|
||||
std::string RequestStatistics::to_string() {
|
||||
std::stringstream ss;
|
||||
|
||||
|
|
|
@ -87,6 +87,19 @@ class RequestStatistics {
|
|||
}
|
||||
}
|
||||
|
||||
static void SET_QUEUE_START(RequestStatistics* stat, int64_t nrQueued) {
|
||||
if (stat != nullptr) {
|
||||
stat->_queueStart = StatisticsFeature::time();
|
||||
stat->_queueSize = nrQueued;
|
||||
}
|
||||
}
|
||||
|
||||
static void SET_QUEUE_END(RequestStatistics* stat) {
|
||||
if (stat != nullptr) {
|
||||
stat->_queueEnd = StatisticsFeature::time();
|
||||
}
|
||||
}
|
||||
|
||||
static void ADD_RECEIVED_BYTES(RequestStatistics* stat, size_t bytes) {
|
||||
if (stat != nullptr) {
|
||||
stat->_receivedBytes += bytes;
|
||||
|
@ -134,6 +147,7 @@ class RequestStatistics {
|
|||
basics::StatisticsDistribution& bytesSent,
|
||||
basics::StatisticsDistribution& bytesReceived);
|
||||
|
||||
std::string timingsCsv();
|
||||
std::string to_string();
|
||||
void trace_log();
|
||||
|
||||
|
@ -161,6 +175,7 @@ class RequestStatistics {
|
|||
_readEnd = 0.0;
|
||||
_queueStart = 0.0;
|
||||
_queueEnd = 0.0;
|
||||
_queueSize = 0;
|
||||
_requestStart = 0.0;
|
||||
_requestEnd = 0.0;
|
||||
_writeStart = 0.0;
|
||||
|
@ -178,8 +193,10 @@ class RequestStatistics {
|
|||
|
||||
double _readStart; // CommTask::processRead - read first byte of message
|
||||
double _readEnd; // CommTask::processRead - message complete
|
||||
double _queueStart; // addJob to queue GeneralServer::handleRequest
|
||||
double _queueEnd; // exit queue DispatcherThread::handleJob
|
||||
double _queueStart; // job added to JobQueue
|
||||
double _queueEnd; // job removed from JobQueue
|
||||
int64_t _queueSize;
|
||||
|
||||
double _requestStart; // GeneralServerJob::work
|
||||
double _requestEnd;
|
||||
double _writeStart;
|
||||
|
|
|
@ -34,14 +34,25 @@ struct DocumentIdentifierToken {
|
|||
public:
|
||||
// TODO Replace by Engine::InvalidToken
|
||||
constexpr DocumentIdentifierToken() : _data(0) {}
|
||||
|
||||
~DocumentIdentifierToken() {}
|
||||
|
||||
DocumentIdentifierToken& operator=(DocumentIdentifierToken const& other) {
|
||||
|
||||
DocumentIdentifierToken(DocumentIdentifierToken const& other) noexcept : _data(other._data) {}
|
||||
DocumentIdentifierToken& operator=(DocumentIdentifierToken const& other) noexcept {
|
||||
_data = other._data;
|
||||
return *this;
|
||||
}
|
||||
|
||||
DocumentIdentifierToken(DocumentIdentifierToken&& other) noexcept : _data(other._data) {
|
||||
other._data = 0;
|
||||
}
|
||||
|
||||
DocumentIdentifierToken& operator=(DocumentIdentifierToken&& other) noexcept {
|
||||
_data = other._data;
|
||||
other._data = 0;
|
||||
return *this;
|
||||
}
|
||||
|
||||
~DocumentIdentifierToken() {}
|
||||
|
||||
inline bool operator==(DocumentIdentifierToken const& other) const { return _data == other._data; }
|
||||
|
||||
inline bool operator==(uint64_t const& other) const { return _data == other; }
|
||||
|
|
|
@ -104,7 +104,7 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
// status functionality
|
||||
// --------------------
|
||||
|
||||
// return the name of the storage engine
|
||||
// return the name of the specific storage engine e.g. rocksdb
|
||||
char const* typeName() const { return _typeName.c_str(); }
|
||||
|
||||
// inventory functionality
|
||||
|
|
|
@ -392,40 +392,41 @@ static void JS_ChecksumCollection(
|
|||
|
||||
ManagedDocumentResult mmdr;
|
||||
trx.invokeOnAllElements(col->name(), [&hash, &withData, &withRevisions, &trx, &collection, &mmdr](DocumentIdentifierToken const& token) {
|
||||
collection->readDocument(&trx, token, mmdr);
|
||||
VPackSlice const slice(mmdr.vpack());
|
||||
if (collection->readDocument(&trx, token, mmdr)) {
|
||||
VPackSlice const slice(mmdr.vpack());
|
||||
|
||||
uint64_t localHash = transaction::helpers::extractKeyFromDocument(slice).hashString();
|
||||
uint64_t localHash = transaction::helpers::extractKeyFromDocument(slice).hashString();
|
||||
|
||||
if (withRevisions) {
|
||||
if (withRevisions) {
|
||||
localHash += transaction::helpers::extractRevSliceFromDocument(slice).hash();
|
||||
}
|
||||
}
|
||||
|
||||
if (withData) {
|
||||
if (withData) {
|
||||
// with data
|
||||
uint64_t const n = slice.length() ^ 0xf00ba44ba5;
|
||||
uint64_t seed = fasthash64_uint64(n, 0xdeadf054);
|
||||
|
||||
for (auto const& it : VPackObjectIterator(slice, false)) {
|
||||
// loop over all attributes, but exclude _rev, _id and _key
|
||||
// _id is different for each collection anyway, _rev is covered by withRevisions, and _key
|
||||
// was already handled before
|
||||
VPackValueLength keyLength;
|
||||
char const* key = it.key.getString(keyLength);
|
||||
if (keyLength >= 3 &&
|
||||
key[0] == '_' &&
|
||||
((keyLength == 3 && memcmp(key, "_id", 3) == 0) ||
|
||||
(keyLength == 4 && (memcmp(key, "_key", 4) == 0 || memcmp(key, "_rev", 4) == 0)))) {
|
||||
// exclude attribute
|
||||
continue;
|
||||
}
|
||||
|
||||
localHash ^= it.key.hash(seed) ^ 0xba5befd00d;
|
||||
localHash += it.value.normalizedHash(seed) ^ 0xd4129f526421;
|
||||
// loop over all attributes, but exclude _rev, _id and _key
|
||||
// _id is different for each collection anyway, _rev is covered by withRevisions, and _key
|
||||
// was already handled before
|
||||
VPackValueLength keyLength;
|
||||
char const* key = it.key.getString(keyLength);
|
||||
if (keyLength >= 3 &&
|
||||
key[0] == '_' &&
|
||||
((keyLength == 3 && memcmp(key, "_id", 3) == 0) ||
|
||||
(keyLength == 4 && (memcmp(key, "_key", 4) == 0 || memcmp(key, "_rev", 4) == 0)))) {
|
||||
// exclude attribute
|
||||
continue;
|
||||
}
|
||||
|
||||
localHash ^= it.key.hash(seed) ^ 0xba5befd00d;
|
||||
localHash += it.value.normalizedHash(seed) ^ 0xd4129f526421;
|
||||
}
|
||||
}
|
||||
|
||||
hash ^= localHash;
|
||||
}
|
||||
}
|
||||
|
||||
hash ^= localHash;
|
||||
return true;
|
||||
});
|
||||
|
||||
|
|
|
@ -21,12 +21,9 @@
|
|||
/// @author Dr. Frank Celler
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "v8-replication.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Cluster/ClusterFeature.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/mmfiles-replication-dump.h"
|
||||
#include "Replication/InitialSyncer.h"
|
||||
#include "Rest/Version.h"
|
||||
#include "RestServer/ServerIdFeature.h"
|
||||
|
@ -35,6 +32,14 @@
|
|||
#include "V8/v8-utils.h"
|
||||
#include "V8/v8-vpack.h"
|
||||
#include "V8Server/v8-vocbaseprivate.h"
|
||||
#include "v8-replication.h"
|
||||
|
||||
// FIXME to be removed (should be storage engine independent - get it working now)
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "MMFiles/MMFilesLogfileManager.h"
|
||||
#include "MMFiles/mmfiles-replication-dump.h"
|
||||
#include "RocksDBEngine/RocksDBEngine.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Parser.h>
|
||||
|
@ -51,21 +56,41 @@ using namespace arangodb::rest;
|
|||
|
||||
static void JS_StateLoggerReplication(
|
||||
v8::FunctionCallbackInfo<v8::Value> const& args) {
|
||||
// FIXME: use code in RestReplicationHandler and get rid of storage-engine
|
||||
// depended code here
|
||||
//
|
||||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
MMFilesLogfileManagerState const s =
|
||||
MMFilesLogfileManager::instance()->state();
|
||||
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
|
||||
|
||||
v8::Handle<v8::Object> state = v8::Object::New(isolate);
|
||||
state->Set(TRI_V8_ASCII_STRING("running"), v8::True(isolate));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastLogTick"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastCommittedTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastAssignedTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
|
||||
|
||||
if(engineName == "mmfiles"){
|
||||
MMFilesLogfileManagerState const s = MMFilesLogfileManager::instance()->state();
|
||||
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
|
||||
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastCommittedTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"), TRI_V8UInt64String<TRI_voc_tick_t>(isolate, s.lastAssignedTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
|
||||
v8::Number::New(isolate, static_cast<double>(s.numEvents + s.numEventsSync)));
|
||||
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(s.timeString));
|
||||
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(s.timeString));
|
||||
} else if (engineName == "rocksdb") {
|
||||
rocksdb::TransactionDB* db = rocksutils::globalRocksDB();
|
||||
uint64_t lastTick = db->GetLatestSequenceNumber();
|
||||
state->Set(TRI_V8_ASCII_STRING("lastLogTick"),
|
||||
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("lastUncommittedLogTick"),
|
||||
TRI_V8UInt64String<TRI_voc_tick_t>(isolate, lastTick));
|
||||
state->Set(TRI_V8_ASCII_STRING("totalEvents"),
|
||||
v8::Number::New(isolate, static_cast<double>(0))); //s.numEvents + s.numEventsSync)));
|
||||
state->Set(TRI_V8_ASCII_STRING("time"), TRI_V8_STD_STRING(utilities::timeString()));
|
||||
} else {
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "invalid storage engine");
|
||||
return;
|
||||
}
|
||||
|
||||
v8::Handle<v8::Object> result = v8::Object::New(isolate);
|
||||
result->Set(TRI_V8_ASCII_STRING("state"), state);
|
||||
|
||||
v8::Handle<v8::Object> server = v8::Object::New(isolate);
|
||||
|
@ -91,6 +116,12 @@ static void JS_TickRangesLoggerReplication(
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
|
||||
if( engineName != "mmfiles"){
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "only implemented for mmfiles engine");
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& ranges = MMFilesLogfileManager::instance()->ranges();
|
||||
|
||||
v8::Handle<v8::Array> result = v8::Array::New(isolate, (int)ranges.size());
|
||||
|
@ -121,6 +152,12 @@ static void JS_FirstTickLoggerReplication(
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
|
||||
if( engineName != "mmfiles"){
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "only implemented for mmfiles engine");
|
||||
return;
|
||||
}
|
||||
|
||||
auto const& ranges = MMFilesLogfileManager::instance()->ranges();
|
||||
|
||||
TRI_voc_tick_t tick = UINT64_MAX;
|
||||
|
@ -152,6 +189,12 @@ static void JS_LastLoggerReplication(
|
|||
TRI_V8_TRY_CATCH_BEGIN(isolate);
|
||||
v8::HandleScope scope(isolate);
|
||||
|
||||
std::string engineName = EngineSelectorFeature::ENGINE->typeName();
|
||||
if( engineName != "mmfiles"){
|
||||
TRI_V8_THROW_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "only implemented for mmfiles engine");
|
||||
return;
|
||||
}
|
||||
|
||||
TRI_vocbase_t* vocbase = GetContextVocBase(isolate);
|
||||
|
||||
if (vocbase == nullptr) {
|
||||
|
|
|
@ -74,9 +74,9 @@ macro (install_readme input output)
|
|||
if (MSVC)
|
||||
set(CRLFSTYLE "CRLF")
|
||||
endif ()
|
||||
configure_file(${PROJECT_SOURCE_DIR}/${input} "${PROJECT_BINARY_DIR}/${input}" NEWLINE_STYLE ${CRLFSTYLE})
|
||||
configure_file(${PROJECT_SOURCE_DIR}/${input} "${PROJECT_BINARY_DIR}/${output}" NEWLINE_STYLE ${CRLFSTYLE})
|
||||
install(
|
||||
FILES "${PROJECT_BINARY_DIR}/${input}"
|
||||
FILES "${PROJECT_BINARY_DIR}/${output}"
|
||||
DESTINATION "${where}"
|
||||
)
|
||||
endmacro ()
|
||||
|
|
|
@ -7,7 +7,6 @@ endif()
|
|||
if(SNAPCRAFT_FOUND)
|
||||
set(SNAPCRAFT_TEMPLATE_DIR "${CMAKE_CURRENT_SOURCE_DIR}/Installation/Ubuntu")
|
||||
set(SNAPCRAFT_SOURCE_DIR "${CMAKE_BINARY_DIR}/_CPack_Packages/SNAP")
|
||||
set(CPACK_SNAP_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}_*${ARANGODB_PACKAGE_ARCHITECTURE}.snap")
|
||||
|
||||
message(STATUS "Creating snap package")
|
||||
|
||||
|
@ -31,7 +30,12 @@ if(SNAPCRAFT_FOUND)
|
|||
file(
|
||||
COPY "${SNAPCRAFT_TEMPLATE_DIR}/arangodb.png"
|
||||
DESTINATION "${SNAPCRAFT_SOURCE_DIR}/"
|
||||
)
|
||||
)
|
||||
|
||||
set(CPACK_SNAP_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}_${ARANGODB_PACKAGE_ARCHITECTURE}.snap")
|
||||
if(NOT EXISTS ${CPACK_SNAP_PACKAGE_FILE_NAME})
|
||||
set(CPACK_SNAP_PACKAGE_FILE_NAME "${CPACK_PACKAGE_NAME}_${CPACK_PACKAGE_VERSION}-${ARANGODB_PACKAGE_REVISION}_${ARANGODB_PACKAGE_ARCHITECTURE}.snap")
|
||||
endif()
|
||||
add_custom_target(snap
|
||||
COMMENT "create snap-package"
|
||||
COMMAND ${SNAP_EXE} snap
|
||||
|
|
|
@ -608,13 +608,14 @@
|
|||
this.waitForInit(this.documents.bind(this), colid, pageid);
|
||||
return;
|
||||
}
|
||||
if (!this.documentsView) {
|
||||
this.documentsView = new window.DocumentsView({
|
||||
collection: new window.ArangoDocuments(),
|
||||
documentStore: this.arangoDocumentStore,
|
||||
collectionsStore: this.arangoCollectionsStore
|
||||
});
|
||||
if (this.documentsView) {
|
||||
this.documentsView.removeView();
|
||||
}
|
||||
this.documentsView = new window.DocumentsView({
|
||||
collection: new window.ArangoDocuments(),
|
||||
documentStore: this.arangoDocumentStore,
|
||||
collectionsStore: this.arangoCollectionsStore
|
||||
});
|
||||
this.documentsView.setCollectionId(colid, pageid);
|
||||
this.documentsView.render();
|
||||
},
|
||||
|
|
|
@ -30,6 +30,14 @@
|
|||
next: null
|
||||
},
|
||||
|
||||
removeView: function () {
|
||||
this.$el.empty().off(); /* off to unbind the events */
|
||||
this.stopListening();
|
||||
this.unbind();
|
||||
delete this.el;
|
||||
return this;
|
||||
},
|
||||
|
||||
editButtons: ['#deleteSelected', '#moveSelected'],
|
||||
|
||||
initialize: function (options) {
|
||||
|
|
|
@ -44,22 +44,52 @@
|
|||
$('.login-window #databases').show();
|
||||
|
||||
$.ajax(url).success(function (permissions) {
|
||||
// enable db select and login button
|
||||
$('#loginDatabase').html('');
|
||||
// fill select with allowed dbs
|
||||
_.each(permissions.result, function (rule, db) {
|
||||
if (errCallback) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + db + '</option>'
|
||||
);
|
||||
} else {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + rule + '</option>'
|
||||
);
|
||||
}
|
||||
});
|
||||
var successFunc = function (availableDbs) {
|
||||
// enable db select and login button
|
||||
$('#loginDatabase').html('');
|
||||
// fill select with allowed dbs
|
||||
_.each(permissions.result, function (rule, db) {
|
||||
if (errCallback) {
|
||||
if (availableDbs) {
|
||||
if (availableDbs.indexOf(db) > -1) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + db + '</option>'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + db + '</option>'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
if (availableDbs) {
|
||||
if (availableDbs.indexOf(db) > -1) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + rule + '</option>'
|
||||
);
|
||||
}
|
||||
} else {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + rule + '</option>'
|
||||
);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
self.renderDBS();
|
||||
self.renderDBS();
|
||||
};
|
||||
|
||||
// fetch available dbs
|
||||
var availableDbs;
|
||||
try {
|
||||
$.ajax(arangoHelper.databaseUrl('/_api/database/user')).success(function (dbs) {
|
||||
availableDbs = dbs.result;
|
||||
successFunc(availableDbs);
|
||||
});
|
||||
} catch (ignore) {
|
||||
console.log(ignore);
|
||||
successFunc();
|
||||
}
|
||||
}).error(function () {
|
||||
if (errCallback) {
|
||||
errCallback();
|
||||
|
@ -191,14 +221,35 @@
|
|||
// enable db select and login button
|
||||
$('#loginDatabase').html('');
|
||||
|
||||
// fill select with allowed dbs
|
||||
_.each(permissions.result, function (db, key) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + key + '</option>'
|
||||
);
|
||||
});
|
||||
var successFunc = function (availableDbs) {
|
||||
if (availableDbs) {
|
||||
_.each(permissions.result, function (db, key) {
|
||||
if (availableDbs.indexOf(key) > -1) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + key + '</option>'
|
||||
);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
// fill select with allowed dbs
|
||||
_.each(permissions.result, function (db, key) {
|
||||
$('#loginDatabase').append(
|
||||
'<option>' + key + '</option>'
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
self.renderDBS();
|
||||
self.renderDBS();
|
||||
};
|
||||
|
||||
// fetch available dbs
|
||||
try {
|
||||
$.ajax(arangoHelper.databaseUrl('/_api/database/user')).success(function (dbs) {
|
||||
successFunc(dbs.result);
|
||||
});
|
||||
} catch (ignore) {
|
||||
successFunc();
|
||||
}
|
||||
}).error(function () {
|
||||
$('.wrong-credentials').show();
|
||||
});
|
||||
|
|
|
@ -42,7 +42,7 @@ describe('HTTP headers in Foxx services', function () {
|
|||
expect(result.headers['x-foobar']).to.equal('baz');
|
||||
expect(result.headers['x-nofoobar']).to.equal('baz');
|
||||
const irrelevantHeaders = ['http/1.1', 'connection', 'content-type'];
|
||||
expect(result.headers['access-control-expose-headers']).to.equal(Object.keys(result.headers).filter(x => !x.startsWith('access-control-') && !irrelevantHeaders.includes(x)).sort().join(', '));
|
||||
expect(result.headers['access-control-expose-headers']).to.equal(Object.keys(result.headers).filter(x => !x.startsWith('x-content-type-options') && !x.startsWith('access-control-') && !irrelevantHeaders.includes(x)).sort().join(', '));
|
||||
expect(result.headers['access-control-allow-credentials']).to.equal('true');
|
||||
});
|
||||
|
||||
|
@ -68,7 +68,7 @@ describe('HTTP headers in Foxx services', function () {
|
|||
var opts = { headers: { origin }, method: "POST" };
|
||||
var result = internal.download(origin + "/unittest/headers/header-empty", "", opts);
|
||||
const irrelevantHeaders = ['http/1.1', 'connection', 'content-type'];
|
||||
expect(result.headers['access-control-expose-headers']).to.equal(Object.keys(result.headers).filter(x => !x.startsWith('access-control-') && !irrelevantHeaders.includes(x)).sort().join(', '));
|
||||
expect(result.headers['access-control-expose-headers']).to.equal(Object.keys(result.headers).filter(x => !x.startsWith('x-content-type-options') && !x.startsWith('access-control-') && !irrelevantHeaders.includes(x)).sort().join(', '));
|
||||
expect(result.headers['access-control-allow-credentials']).to.equal('true');
|
||||
});
|
||||
});
|
||||
|
|
|
@ -168,6 +168,9 @@ function request (req) {
|
|||
} else {
|
||||
options.maxRedirects = 10;
|
||||
}
|
||||
if (req.sslProtocol) {
|
||||
options.sslProtocol = req.sslProtocol;
|
||||
}
|
||||
let result = internal.download(path, body, options);
|
||||
|
||||
return new Response(result, req.encoding, req.json);
|
||||
|
|
|
@ -183,7 +183,7 @@ function ReplicationLoggerSuite () {
|
|||
assertTrue(typeof tick === 'string');
|
||||
assertNotEqual("", state.time);
|
||||
assertMatch(/^\d+-\d+-\d+T\d+:\d+:\d+Z$/, state.time);
|
||||
|
||||
|
||||
// query the state again
|
||||
state = replication.logger.state().state;
|
||||
assertTrue(state.running);
|
||||
|
|
|
@ -119,6 +119,28 @@ function optimizerIndexesTestSuite () {
|
|||
assertEqual(0, results.stats.scannedIndex);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test _id
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUsePrimaryIdInAttributeAccess : function () {
|
||||
var values = [ "UnitTestsCollection/test1", "UnitTestsCollection/test2", "UnitTestsCollection/test21", "UnitTestsCollection/test30" ];
|
||||
var query = "LET data = NOOPT({ ids : " + JSON.stringify(values) + " }) FOR i IN " + c.name() + " FILTER i._id IN data.ids RETURN i.value";
|
||||
|
||||
var plan = AQL_EXPLAIN(query).plan;
|
||||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
return node.type;
|
||||
});
|
||||
|
||||
assertEqual("SingletonNode", nodeTypes[0], query);
|
||||
assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query);
|
||||
|
||||
var results = AQL_EXECUTE(query);
|
||||
assertEqual([ 1, 2, 21, 30 ], results.json.sort(), query);
|
||||
assertEqual(0, results.stats.scannedFull);
|
||||
assertEqual(4, results.stats.scannedIndex);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test _id
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -205,6 +227,28 @@ function optimizerIndexesTestSuite () {
|
|||
assertEqual(0, results.stats.scannedIndex);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test _id
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUsePrimaryKeyInAttributeAccess : function () {
|
||||
var values = [ "test1", "test2", "test21", "test30" ];
|
||||
var query = "LET data = NOOPT({ ids : " + JSON.stringify(values) + " }) FOR i IN " + c.name() + " FILTER i._key IN data.ids RETURN i.value";
|
||||
|
||||
var plan = AQL_EXPLAIN(query).plan;
|
||||
var nodeTypes = plan.nodes.map(function(node) {
|
||||
return node.type;
|
||||
});
|
||||
|
||||
assertEqual("SingletonNode", nodeTypes[0], query);
|
||||
assertNotEqual(-1, nodeTypes.indexOf("IndexNode"), query);
|
||||
|
||||
var results = AQL_EXECUTE(query);
|
||||
assertEqual([ 1, 2, 21, 30 ], results.json.sort(), query);
|
||||
assertEqual(0, results.stats.scannedFull);
|
||||
assertEqual(4, results.stats.scannedIndex);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test _key
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -2538,7 +2582,18 @@ function optimizerIndexesTestSuite () {
|
|||
[ "FOR i IN " + c.name() + " FILTER i._key IN ['test23', 'test42'] RETURN i._key", [ 'test23', 'test42' ] ],
|
||||
[ "LET a = PASSTHRU([]) FOR i IN " + c.name() + " FILTER i._key IN a RETURN i._key", [ ] ],
|
||||
[ "LET a = PASSTHRU(['test23']) FOR i IN " + c.name() + " FILTER i._key IN a RETURN i._key", [ 'test23' ] ],
|
||||
[ "LET a = PASSTHRU(['test23', 'test42']) FOR i IN " + c.name() + " FILTER i._key IN a RETURN i._key", [ 'test23', 'test42' ] ]
|
||||
[ "LET a = PASSTHRU(['test23', 'test42']) FOR i IN " + c.name() + " FILTER i._key IN a RETURN i._key", [ 'test23', 'test42' ] ],
|
||||
[ "LET a = PASSTHRU({ ids: ['test23', 'test42'] }) FOR i IN " + c.name() + " FILTER i._key IN a.ids RETURN i._key", [ 'test23', 'test42' ] ],
|
||||
[ "LET a = PASSTHRU({ ids: [23, 42] }) FOR i IN " + c.name() + " FILTER i.value2 IN a.ids RETURN i.value2", [ 23, 42 ] ],
|
||||
[ "LET a = PASSTHRU({ ids: [23, 42] }) FOR i IN " + c.name() + " FILTER i.value3 IN a.ids RETURN i.value2", [ 23, 42 ] ],
|
||||
|
||||
// non-arrays. should not fail but return no results
|
||||
[ "LET a = PASSTHRU({}) FOR i IN " + c.name() + " FILTER i._key IN a RETURN i._key", [ ] ],
|
||||
[ "LET a = PASSTHRU({}) FOR i IN " + c.name() + " FILTER i.value2 IN a RETURN i.value2", [ ] ],
|
||||
[ "LET a = PASSTHRU({}) FOR i IN " + c.name() + " FILTER i.value3 IN a RETURN i.value2", [ ] ]
|
||||
|
||||
|
||||
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
|
|
|
@ -241,8 +241,7 @@ function dumpTestSuite () {
|
|||
|
||||
assertEqual(2, c.type()); // document
|
||||
assertFalse(p.waitForSync);
|
||||
assertTrue(p.isVolatile);
|
||||
|
||||
|
||||
assertEqual(1, c.getIndexes().length); // just primary index
|
||||
assertEqual("primary", c.getIndexes()[0].type);
|
||||
assertEqual(0, c.count());
|
||||
|
|
|
@ -43,6 +43,7 @@ var mmfilesEngine = false;
|
|||
if (db._engine().name === "mmfiles") {
|
||||
mmfilesEngine = true;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -114,7 +115,6 @@ function ReplicationSuite() {
|
|||
db._drop(cn);
|
||||
|
||||
connectToSlave();
|
||||
replication.applier.stop();
|
||||
db._drop(cn);
|
||||
},
|
||||
|
||||
|
|
|
@ -54,10 +54,14 @@ RocksDBOptionFeature::RocksDBOptionFeature(
|
|||
_maxBackgroundCompactions(rocksDBDefaults.max_background_compactions),
|
||||
_maxLogFileSize(rocksDBDefaults.max_log_file_size),
|
||||
_keepLogFileNum(rocksDBDefaults.keep_log_file_num),
|
||||
_recycleLogFileNum(rocksDBDefaults.recycle_log_file_num),
|
||||
_logFileTimeToRoll(rocksDBDefaults.log_file_time_to_roll),
|
||||
_compactionReadaheadSize(rocksDBDefaults.compaction_readahead_size),
|
||||
_verifyChecksumsInCompaction(rocksDBDefaults.verify_checksums_in_compaction),
|
||||
_optimizeFiltersForHits(rocksDBDefaults.optimize_filters_for_hits) {
|
||||
_optimizeFiltersForHits(rocksDBDefaults.optimize_filters_for_hits),
|
||||
_useDirectReads(rocksDBDefaults.use_direct_reads),
|
||||
_useDirectWrites(rocksDBDefaults.use_direct_writes),
|
||||
_skipCorrupted(false) {
|
||||
setOptional(true);
|
||||
requiresElevatedPrivileges(false);
|
||||
startsAfter("DatabasePath");
|
||||
|
@ -108,13 +112,13 @@ void RocksDBOptionFeature::collectOptions(std::shared_ptr<ProgramOptions> option
|
|||
"control maximum total data size for a level",
|
||||
new UInt64Parameter(&_maxBytesForLevelMultiplier));
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.verify-checksums-in-compation",
|
||||
"if true, compaction will verify checksum on every read that happens "
|
||||
"as part of compaction",
|
||||
new BooleanParameter(&_verifyChecksumsInCompaction));
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.optimize-filters-for-hits",
|
||||
"this flag specifies that the implementation should optimize the filters "
|
||||
"mainly for cases where keys are found rather than also optimize for keys "
|
||||
|
@ -123,39 +127,60 @@ void RocksDBOptionFeature::collectOptions(std::shared_ptr<ProgramOptions> option
|
|||
"important",
|
||||
new BooleanParameter(&_optimizeFiltersForHits));
|
||||
|
||||
options->addOption(
|
||||
#ifdef __linux__
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.use-direct-reads",
|
||||
"use O_DIRECT for reading files",
|
||||
new BooleanParameter(&_useDirectReads));
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.use-direct-writes",
|
||||
"use O_DIRECT for writing files",
|
||||
new BooleanParameter(&_useDirectWrites));
|
||||
#endif
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.base-background-compactions",
|
||||
"suggested number of concurrent background compaction jobs",
|
||||
new UInt64Parameter(&_baseBackgroundCompactions));
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.max-background-compactions",
|
||||
"maximum number of concurrent background compaction jobs",
|
||||
new UInt64Parameter(&_maxBackgroundCompactions));
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.max-log-file-size",
|
||||
"specify the maximal size of the info log file",
|
||||
new UInt64Parameter(&_maxLogFileSize));
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.keep-log-file-num",
|
||||
"maximal info log files to be kept",
|
||||
new UInt64Parameter(&_keepLogFileNum));
|
||||
|
||||
options->addOption(
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.recycle-log-file-num",
|
||||
"number of log files to keep around for recycling",
|
||||
new UInt64Parameter(&_recycleLogFileNum));
|
||||
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.log-file-time-to-roll",
|
||||
"time for the info log file to roll (in seconds). "
|
||||
"If specified with non-zero value, log file will be rolled "
|
||||
"if it has been active longer than `log_file_time_to_roll`",
|
||||
new UInt64Parameter(&_logFileTimeToRoll));
|
||||
|
||||
options->addOption(
|
||||
options->addHiddenOption(
|
||||
"--rocksdb.compaction-read-ahead-size",
|
||||
"if non-zero, we perform bigger reads when doing compaction. If you're "
|
||||
"running RocksDB on spinning disks, you should set this to at least 2MB. "
|
||||
"that way RocksDB's compaction is doing sequential instead of random reads.",
|
||||
new UInt64Parameter(&_compactionReadaheadSize));
|
||||
|
||||
options->addHiddenOption("--rocksdb.wal-recovery-skip-corrupted",
|
||||
"skip corrupted records in WAL recovery",
|
||||
new BooleanParameter(&_skipCorrupted));
|
||||
}
|
||||
|
||||
void RocksDBOptionFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
|
||||
|
|
|
@ -58,10 +58,14 @@ class RocksDBOptionFeature final : public application_features::ApplicationFeatu
|
|||
uint64_t _maxBackgroundCompactions;
|
||||
uint64_t _maxLogFileSize;
|
||||
uint64_t _keepLogFileNum;
|
||||
uint64_t _recycleLogFileNum;
|
||||
uint64_t _logFileTimeToRoll;
|
||||
uint64_t _compactionReadaheadSize;
|
||||
bool _verifyChecksumsInCompaction;
|
||||
bool _optimizeFiltersForHits;
|
||||
bool _useDirectReads;
|
||||
bool _useDirectWrites;
|
||||
bool _skipCorrupted;
|
||||
};
|
||||
|
||||
}
|
||||
|
|
|
@ -104,11 +104,13 @@ std::string const StaticStrings::HLCHeader("x-arango-hlc");
|
|||
std::string const StaticStrings::KeepAlive("Keep-Alive");
|
||||
std::string const StaticStrings::Location("location");
|
||||
std::string const StaticStrings::MultiPartContentType("multipart/form-data");
|
||||
std::string const StaticStrings::NoSniff("nosniff");
|
||||
std::string const StaticStrings::Origin("origin");
|
||||
std::string const StaticStrings::Queue("x-arango-queue");
|
||||
std::string const StaticStrings::Server("server");
|
||||
std::string const StaticStrings::StartThread("x-arango-start-thread");
|
||||
std::string const StaticStrings::WwwAuthenticate("www-authenticate");
|
||||
std::string const StaticStrings::WwwAuthenticate("www-authenticate");
|
||||
std::string const StaticStrings::XContentTypeOptions("x-content-type-options");
|
||||
|
||||
// mime types
|
||||
std::string const StaticStrings::MimeTypeJson(
|
||||
|
|
|
@ -100,11 +100,13 @@ class StaticStrings {
|
|||
static std::string const KeepAlive;
|
||||
static std::string const Location;
|
||||
static std::string const MultiPartContentType;
|
||||
static std::string const NoSniff;
|
||||
static std::string const Origin;
|
||||
static std::string const Queue;
|
||||
static std::string const Server;
|
||||
static std::string const StartThread;
|
||||
static std::string const WwwAuthenticate;
|
||||
static std::string const XContentTypeOptions;
|
||||
|
||||
// mime types
|
||||
static std::string const MimeTypeJson;
|
||||
|
|
Loading…
Reference in New Issue