1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

* 'devel' of github.com:arangodb/arangodb:
  make geo index work with nested attributes
  added tests
  updated CHANGELOG
  fix port number over-/underruns
  fixed web ui display bug
  mark the geo index as sorted
  don't tell GatherNode to sort data from an unsorted index
  fix cluster AQL statistics
  simplify stats a bit
This commit is contained in:
Jan Christoph Uhde 2017-02-03 13:25:25 +01:00
commit 7eca8f6b8e
20 changed files with 214 additions and 128 deletions

View File

@ -1,6 +1,8 @@
devel
-----
* fix potential port number over-/underruns
* added startup option `--log.shorten-filenames` for controlling whether filenames
in log message should be shortened to just the filename with the absolute path

View File

@ -49,8 +49,6 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
// --SECTION-- macros
// -----------------------------------------------------------------------------
#define DELETE_ENDPOINT(e) if (e != 0) delete e;
#define FACTORY_NAME(name) name ## Factory
#define FACTORY(name, specification) arangodb::Endpoint::FACTORY_NAME(name)(specification)
@ -58,12 +56,12 @@ BOOST_TEST_DONT_PRINT_LOG_VALUE(arangodb::Endpoint::EndpointType)
#define CHECK_ENDPOINT_FEATURE(type, specification, feature, expected) \
e = FACTORY(type, specification); \
BOOST_CHECK_EQUAL((expected), (e->feature())); \
DELETE_ENDPOINT(e);
delete e;
#define CHECK_ENDPOINT_SERVER_FEATURE(type, specification, feature, expected) \
e = arangodb::Endpoint::serverFactory(specification, 1, true); \
BOOST_CHECK_EQUAL((expected), (e->feature())); \
DELETE_ENDPOINT(e);
delete e;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
@ -118,6 +116,11 @@ BOOST_AUTO_TEST_CASE (EndpointInvalid) {
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("ssl@tcp://127.0.0.1:8529"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:8529"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("https@tcp://127.0.0.1:"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65536"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:65537"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:-1"));
BOOST_CHECK_EQUAL(e, arangodb::Endpoint::clientFactory("tcp://127.0.0.1:6555555555"));
}
////////////////////////////////////////////////////////////////////////////////
@ -491,7 +494,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer1) {
e = arangodb::Endpoint::serverFactory("tcp://127.0.0.1", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -503,7 +506,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer2) {
e = arangodb::Endpoint::serverFactory("ssl://127.0.0.1", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -516,7 +519,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedServer3) {
e = arangodb::Endpoint::serverFactory("unix:///tmp/socket", 1, true);
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
#endif
@ -529,7 +532,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient1) {
e = arangodb::Endpoint::clientFactory("tcp://127.0.0.1");
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -541,7 +544,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient2) {
e = arangodb::Endpoint::clientFactory("ssl://127.0.0.1");
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -554,7 +557,7 @@ BOOST_AUTO_TEST_CASE (EndpointIsConnectedClient3) {
e = arangodb::Endpoint::clientFactory("unix:///tmp/socket");
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
#endif
@ -575,7 +578,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerTcpIpv4WithPort) {
BOOST_CHECK_EQUAL(667, e->port());
BOOST_CHECK_EQUAL("127.0.0.1:667", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -596,7 +599,7 @@ BOOST_AUTO_TEST_CASE (EndpointServerUnix) {
BOOST_CHECK_EQUAL(0, e->port());
BOOST_CHECK_EQUAL("localhost", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
#endif
@ -617,7 +620,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientSslIpV6WithPortHttp) {
BOOST_CHECK_EQUAL(43425, e->port());
BOOST_CHECK_EQUAL("[0001:0002:0003:0004:0005:0006:0007:0008]:43425", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
////////////////////////////////////////////////////////////////////////////////
@ -637,7 +640,7 @@ BOOST_AUTO_TEST_CASE (EndpointClientTcpIpv6WithoutPort) {
BOOST_CHECK_EQUAL(8529, e->port());
BOOST_CHECK_EQUAL("[::]:8529", e->hostAndPort());
BOOST_CHECK_EQUAL(false, e->isConnected());
DELETE_ENDPOINT(e);
delete e;
}
BOOST_AUTO_TEST_SUITE_END()

View File

@ -38,6 +38,7 @@
using namespace arangodb::basics;
static bool Initialized = false;
static uint64_t counter = 0;
// -----------------------------------------------------------------------------
// --SECTION-- setup / tear-down
@ -73,8 +74,6 @@ struct CFilesSetup {
}
StringBuffer* writeFile (const char* blob) {
static uint64_t counter = 0;
StringBuffer* filename = new StringBuffer(TRI_UNKNOWN_MEM_ZONE);
filename->appendText(_directory);
filename->appendText("/tmp-");
@ -108,6 +107,71 @@ struct CFilesSetup {
BOOST_FIXTURE_TEST_SUITE(CFilesTest, CFilesSetup)
BOOST_AUTO_TEST_CASE (tst_createdirectory) {
std::ostringstream out;
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
std::string filename = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateDirectory(filename.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename.c_str()));
res = TRI_RemoveDirectory(filename.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename.c_str()));
}
BOOST_AUTO_TEST_CASE (tst_createdirectoryrecursive) {
std::ostringstream out;
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
std::string filename1 = out.str();
out << "/abc";
std::string filename2 = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
res = TRI_RemoveDirectory(filename1.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
}
BOOST_AUTO_TEST_CASE (tst_removedirectorydeterministic) {
std::ostringstream out;
out << _directory.c_str() << "/tmp-" << ++counter << "-dir";
std::string filename1 = out.str();
out << "/abc";
std::string filename2 = out.str();
long unused1;
std::string unused2;
int res = TRI_CreateRecursiveDirectory(filename2.c_str(), unused1, unused2);
BOOST_CHECK_EQUAL(0, res);
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(true, TRI_IsDirectory(filename2.c_str()));
res = TRI_RemoveDirectoryDeterministic(filename1.c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename1.c_str()));
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename2.c_str()));
BOOST_CHECK_EQUAL(false, TRI_IsDirectory(filename2.c_str()));
}
////////////////////////////////////////////////////////////////////////////////
/// @brief test file exists
////////////////////////////////////////////////////////////////////////////////
@ -116,6 +180,7 @@ BOOST_AUTO_TEST_CASE (tst_existsfile) {
StringBuffer* filename = writeFile("");
BOOST_CHECK_EQUAL(true, TRI_ExistsFile(filename->c_str()));
TRI_UnlinkFile(filename->c_str());
BOOST_CHECK_EQUAL(false, TRI_ExistsFile(filename->c_str()));
delete filename;
}

View File

@ -418,7 +418,6 @@ struct AstNode {
bool isAttributeAccessForVariable(Variable const* variable, bool allowIndexedAccess) const {
auto node = getAttributeAccessForVariable(allowIndexedAccess);
if (node == nullptr) {
return false;
}

View File

@ -33,6 +33,7 @@
#include "Aql/AqlValue.h"
#include "Aql/BlockCollector.h"
#include "Aql/ExecutionEngine.h"
#include "Aql/ExecutionStats.h"
#include "Basics/Exceptions.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h"
@ -1328,7 +1329,7 @@ int RemoteBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
responseBodyBuf.c_str(), responseBodyBuf.length());
VPackSlice slice = builder->slice();
if (slice.hasKey("code")) {
return slice.get("code").getNumericValue<int>();
}
@ -1362,9 +1363,14 @@ int RemoteBlock::shutdown(int errorCode) {
std::shared_ptr<VPackBuilder> builder =
VPackParser::fromJson(responseBodyBuf.c_str(), responseBodyBuf.length());
VPackSlice slice = builder->slice();
// read "warnings" attribute if present and add it to our query
if (slice.isObject()) {
if (slice.hasKey("stats")) {
ExecutionStats newStats(slice.get("stats"));
_engine->_stats.add(newStats);
}
// read "warnings" attribute if present and add it to our query
VPackSlice warnings = slice.get("warnings");
if (warnings.isArray()) {
auto query = _engine->getQuery();
@ -1415,19 +1421,14 @@ AqlItemBlock* RemoteBlock::getSome(size_t atLeast, size_t atMost) {
res->result->getBodyVelocyPack();
VPackSlice responseBody = responseBodyBuilder->slice();
ExecutionStats newStats(responseBody.get("stats"));
_engine->_stats.addDelta(_deltaStats, newStats);
_deltaStats = newStats;
if (VelocyPackHelper::getBooleanValue(responseBody, "exhausted", true)) {
traceGetSomeEnd(nullptr);
return nullptr;
}
auto r = new arangodb::aql::AqlItemBlock(_engine->getQuery()->resourceMonitor(), responseBody);
traceGetSomeEnd(r);
return r;
auto r = std::make_unique<AqlItemBlock>(_engine->getQuery()->resourceMonitor(), responseBody);
traceGetSomeEnd(r.get());
return r.release();
// cppcheck-suppress style
DEBUG_END_BLOCK();

View File

@ -28,7 +28,6 @@
#include "Aql/ClusterNodes.h"
#include "Aql/ExecutionBlock.h"
#include "Aql/ExecutionNode.h"
#include "Aql/ExecutionStats.h"
#include "Rest/GeneralRequest.h"
namespace arangodb {
@ -339,9 +338,6 @@ class RemoteBlock : public ExecutionBlock {
/// @brief the ID of the query on the server as a string
std::string _queryId;
/// @brief the ID of the query on the server as a string
ExecutionStats _deltaStats;
/// @brief whether or not this block will forward initialize,
/// initializeCursor or shutDown requests
bool const _isResponsibleForInitializeCursor;

View File

@ -529,6 +529,7 @@ struct CoordinatorInstanciator : public WalkerWorker<ExecutionNode> {
VPackBuilder tmp;
query->ast()->variables()->toVelocyPack(tmp);
result.add("initialize", VPackValue(false));
result.add("variables", tmp.slice());
result.add("collections", VPackValue(VPackValueType::Array));
@ -1133,7 +1134,7 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
bool const isCoordinator =
arangodb::ServerState::instance()->isCoordinator(role);
bool const isDBServer = arangodb::ServerState::instance()->isDBServer(role);
TRI_ASSERT(queryRegistry != nullptr);
ExecutionEngine* engine = nullptr;
@ -1354,8 +1355,11 @@ ExecutionEngine* ExecutionEngine::instantiateFromPlan(
}
engine->_root = root;
root->initialize();
root->initializeCursor(nullptr, 0);
if (plan->isResponsibleForInitialize()) {
root->initialize();
root->initializeCursor(nullptr, 0);
}
return engine;
} catch (...) {

View File

@ -177,6 +177,7 @@ ExecutionPlan::ExecutionPlan(Ast* ast)
: _ids(),
_root(nullptr),
_varUsageComputed(false),
_isResponsibleForInitialize(true),
_nextId(0),
_ast(ast),
_lastLimitNode(nullptr),
@ -280,6 +281,7 @@ ExecutionPlan* ExecutionPlan::clone() {
plan->_root = _root->clone(plan.get(), true, false);
plan->_nextId = _nextId;
plan->_appliedRules = _appliedRules;
plan->_isResponsibleForInitialize = _isResponsibleForInitialize;
CloneNodeAdder adder(plan.get());
plan->_root->walk(&adder);
@ -348,6 +350,7 @@ void ExecutionPlan::toVelocyPack(VPackBuilder& builder, Ast* ast, bool verbose)
size_t nrItems = 0;
builder.add("estimatedCost", VPackValue(_root->getCost(nrItems)));
builder.add("estimatedNrItems", VPackValue(nrItems));
builder.add("initialize", VPackValue(_isResponsibleForInitialize));
builder.close();
}
@ -1882,17 +1885,22 @@ void ExecutionPlan::insertDependency(ExecutionNode* oldNode,
/// @brief create a plan from VPack
ExecutionNode* ExecutionPlan::fromSlice(VPackSlice const& slice) {
ExecutionNode* ret = nullptr;
if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan slice is not an object");
}
if (slice.hasKey("initialize")) {
// whether or not this plan (or fragment) is responsible for calling initialize
_isResponsibleForInitialize = slice.get("initialize").getBoolean();
}
VPackSlice nodes = slice.get("nodes");
if (!nodes.isArray()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "plan \"nodes\" attribute is not an array");
}
ExecutionNode* ret = nullptr;
// first, re-create all nodes from the Slice, using the node ids
// no dependency links will be set up in this step

View File

@ -75,6 +75,8 @@ class ExecutionPlan {
/// @brief check if the plan is empty
inline bool empty() const { return (_root == nullptr); }
bool isResponsibleForInitialize() const { return _isResponsibleForInitialize; }
/// @brief note that an optimizer rule was applied
inline void addAppliedRule(int level) { _appliedRules.emplace_back(level); }
@ -299,6 +301,8 @@ class ExecutionPlan {
/// @brief flag to indicate whether the variable usage is computed
bool _varUsageComputed;
bool _isResponsibleForInitialize;
/// @brief auto-increment sequence for node ids
size_t _nextId;

View File

@ -74,9 +74,7 @@ ExecutionStats::ExecutionStats()
executionTime(0.0) {}
ExecutionStats::ExecutionStats(VPackSlice const& slice)
: httpRequests(0),
fullCount(-1),
executionTime(0.0) {
: ExecutionStats() {
if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"stats is not an object");
@ -88,7 +86,6 @@ ExecutionStats::ExecutionStats(VPackSlice const& slice)
scannedIndex = slice.get("scannedIndex").getNumber<int64_t>();
filtered = slice.get("filtered").getNumber<int64_t>();
if (slice.hasKey("httpRequests")) {
httpRequests = slice.get("httpRequests").getNumber<int64_t>();
}

View File

@ -58,21 +58,22 @@ struct ExecutionStats {
scannedIndex += summand.scannedIndex;
filtered += summand.filtered;
httpRequests += summand.httpRequests;
fullCount += summand.fullCount;
if (summand.fullCount > 0) {
// fullCount may be negative, don't add it then
fullCount += summand.fullCount;
}
// intentionally no modification of executionTime
}
/// @brief sumarize the delta of two other sets of ExecutionStats to us
void addDelta(ExecutionStats const& lastStats,
ExecutionStats const& newStats) {
writesExecuted += newStats.writesExecuted - lastStats.writesExecuted;
writesIgnored += newStats.writesIgnored - lastStats.writesIgnored;
scannedFull += newStats.scannedFull - lastStats.scannedFull;
scannedIndex += newStats.scannedIndex - lastStats.scannedIndex;
filtered += newStats.filtered - lastStats.filtered;
httpRequests += newStats.httpRequests - lastStats.httpRequests;
fullCount += newStats.fullCount - lastStats.fullCount;
// intentionally no modification of executionTime
void clear() {
writesExecuted = 0;
writesIgnored = 0;
scannedFull = 0;
scannedIndex = 0;
filtered = 0;
httpRequests = 0;
fullCount = -1;
executionTime = 0.0;
}
/// @brief number of successfully executed write operations

View File

@ -2350,13 +2350,15 @@ void arangodb::aql::scatterInClusterRule(Optimizer* opt, ExecutionPlan* plan,
// Using Index for sort only works if all indexes are equal.
auto first = allIndexes[0].getIndex();
for (auto const& path : first->fieldNames()) {
elements.emplace_back(sortVariable, !isSortReverse, path);
}
for (auto const& it : allIndexes) {
if (first != it.getIndex()) {
elements.clear();
break;
if (first->isSorted()) {
for (auto const& path : first->fieldNames()) {
elements.emplace_back(sortVariable, !isSortReverse, path);
}
for (auto const& it : allIndexes) {
if (first != it.getIndex()) {
elements.clear();
break;
}
}
}
} else if (nodeType == ExecutionNode::INSERT ||
@ -4098,47 +4100,54 @@ MMFilesGeoIndexInfo iterativePreorderWithCondition(EN::NodeType type, AstNode* r
return MMFilesGeoIndexInfo{};
}
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode*,AstNode*> const& pair, ExecutionPlan* plan, MMFilesGeoIndexInfo info){
using SV = std::vector<std::string>;
MMFilesGeoIndexInfo geoDistanceFunctionArgCheck(std::pair<AstNode const*, AstNode const*> const& pair,
ExecutionPlan* plan, MMFilesGeoIndexInfo info){
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess1;
std::pair<Variable const*, std::vector<arangodb::basics::AttributeName>> attributeAccess2;
// first and second should be based on the same document - need to provide the document
// in order to see which collection is bound to it and if that collections supports geo-index
if( !pair.first->isAttributeAccessForVariable() || !pair.second->isAttributeAccessForVariable()){
if (!pair.first->isAttributeAccessForVariable(attributeAccess1) ||
!pair.second->isAttributeAccessForVariable(attributeAccess2)) {
info.invalidate();
return info;
}
TRI_ASSERT(attributeAccess1.first != nullptr);
TRI_ASSERT(attributeAccess2.first != nullptr);
// expect access of the for doc.attribute
// TODO: more complex access path have to be added: loop until REFERENCE TYPE IS FOUND
auto setter1 = plan->getVarSetBy(static_cast<Variable const*>(pair.first->getMember(0)->getData())->id);
auto setter2 = plan->getVarSetBy(static_cast<Variable const*>(pair.second->getMember(0)->getData())->id);
SV accessPath1{pair.first->getString()};
SV accessPath2{pair.second->getString()};
auto setter1 = plan->getVarSetBy(attributeAccess1.first->id);
auto setter2 = plan->getVarSetBy(attributeAccess2.first->id);
if(setter1 == setter2){
if(setter1->getType() == EN::ENUMERATE_COLLECTION){
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
if (setter1 != nullptr &&
setter2 != nullptr &&
setter1 == setter2 &&
setter1->getType() == EN::ENUMERATE_COLLECTION) {
auto collNode = reinterpret_cast<EnumerateCollectionNode*>(setter1);
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
auto lcoll = coll->getCollection();
// TODO - check collection for suitable geo-indexes
for(auto indexShardPtr : lcoll->getIndexes()){
// get real index
arangodb::Index& index = *indexShardPtr.get();
auto coll = collNode->collection(); //what kind of indexes does it have on what attributes
auto lcoll = coll->getCollection();
// TODO - check collection for suitable geo-indexes
for(auto indexShardPtr : lcoll->getIndexes()){
// get real index
arangodb::Index& index = *indexShardPtr.get();
// check if current index is a geo-index
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX) {
continue;
}
// check if current index is a geo-index
if( index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO1_INDEX
&& index.type() != arangodb::Index::IndexType::TRI_IDX_TYPE_GEO2_INDEX){
continue;
}
TRI_ASSERT(index.fields().size() == 2);
//check access paths of attributes in ast and those in index match
if( index.fieldNames()[0] == accessPath1 && index.fieldNames()[1] == accessPath2 ){
info.collectionNode = collNode;
info.index = indexShardPtr;
info.longitude = std::move(accessPath1);
info.latitude = std::move(accessPath2);
return info;
}
//check access paths of attributes in ast and those in index match
if (index.fields()[0] == attributeAccess1.second &&
index.fields()[1] == attributeAccess2.second) {
info.collectionNode = collNode;
info.index = indexShardPtr;
TRI_AttributeNamesJoinNested(attributeAccess1.second, info.longitude, true);
TRI_AttributeNamesJoinNested(attributeAccess2.second, info.latitude, true);
return info;
}
}
}

View File

@ -731,14 +731,12 @@ QueryResult Query::execute(QueryRegistry* registry) {
}
_trx->commit();
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
_engine->_stats.toVelocyPack(*(stats.get()));
result.context = _trx->transactionContext();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR);
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION);
@ -913,18 +911,15 @@ QueryResultV8 Query::executeV8(v8::Isolate* isolate, QueryRegistry* registry) {
_trx->commit();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
_engine->_stats.toVelocyPack(*(stats.get()));
result.context = _trx->transactionContext();
LOG_TOPIC(DEBUG, Logger::QUERIES)
<< TRI_microtime() - _startTime << " "
<< "Query::executeV8: before cleanupPlanAndEngine"
<< " this: " << (uintptr_t) this;
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR);
result.context = _trx->transactionContext();
_engine->_stats.setExecutionTime(TRI_microtime() - _startTime);
auto stats = std::make_shared<VPackBuilder>();
cleanupPlanAndEngine(TRI_ERROR_NO_ERROR, stats.get());
enterState(FINALIZATION);
@ -1387,10 +1382,13 @@ std::string Query::getStateString() const {
}
/// @brief cleanup plan and engine for current query
void Query::cleanupPlanAndEngine(int errorCode) {
void Query::cleanupPlanAndEngine(int errorCode, VPackBuilder* statsBuilder) {
if (_engine != nullptr) {
try {
_engine->shutdown(errorCode);
if (statsBuilder != nullptr) {
_engine->_stats.toVelocyPack(*statsBuilder);
}
} catch (...) {
// shutdown may fail but we must not throw here
// (we're also called from the destructor)

View File

@ -378,7 +378,7 @@ class Query {
void enterState(ExecutionState);
/// @brief cleanup plan and engine for current query
void cleanupPlanAndEngine(int);
void cleanupPlanAndEngine(int, VPackBuilder* statsBuilder = nullptr);
/// @brief create a TransactionContext
std::shared_ptr<arangodb::TransactionContext> createTransactionContext();

View File

@ -697,7 +697,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
try {
res = query->trx()->lockCollections();
} catch (...) {
LOG(ERR) << "lock lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"lock lead to an exception");
@ -726,15 +725,10 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
if (items.get() == nullptr) {
answerBuilder.add("exhausted", VPackValue(true));
answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else {
try {
items->toVelocyPack(query->trx(), answerBuilder);
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} catch (...) {
LOG(ERR) << "cannot transform AqlItemBlock to VelocyPack";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"cannot transform AqlItemBlock to VelocyPack");
@ -760,7 +754,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
skipped = block->skipSomeForShard(atLeast, atMost, shardId);
}
} catch (...) {
LOG(ERR) << "skipSome lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"skipSome lead to an exception");
@ -768,8 +761,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
}
answerBuilder.add("skipped", VPackValue(static_cast<double>(skipped)));
answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else if (operation == "skip") {
auto number =
VelocyPackHelper::getNumericValue<size_t>(querySlice, "number", 1);
@ -789,10 +780,7 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
}
answerBuilder.add("exhausted", VPackValue(exhausted));
answerBuilder.add("error", VPackValue(false));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} catch (...) {
LOG(ERR) << "skip lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"skip lead to an exception");
@ -803,7 +791,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
try {
res = query->engine()->initialize();
} catch (...) {
LOG(ERR) << "initialize lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"initialize lead to an exception");
@ -825,7 +812,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
res = query->engine()->initializeCursor(items.get(), pos);
}
} catch (...) {
LOG(ERR) << "initializeCursor lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"initializeCursor lead to an exception");
@ -833,8 +819,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
}
answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR));
answerBuilder.add("code", VPackValue(static_cast<double>(res)));
answerBuilder.add(VPackValue("stats"));
query->getStats(answerBuilder);
} else if (operation == "shutdown") {
int res = TRI_ERROR_INTERNAL;
int errorCode = VelocyPackHelper::getNumericValue<int>(
@ -854,7 +838,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
_queryRegistry->destroy(_vocbase, _qId, errorCode);
_qId = 0;
} catch (...) {
LOG(ERR) << "shutdown lead to an exception";
generateError(rest::ResponseCode::SERVER_ERROR,
TRI_ERROR_HTTP_SERVER_ERROR,
"shutdown lead to an exception");
@ -863,7 +846,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
answerBuilder.add("error", VPackValue(res != TRI_ERROR_NO_ERROR));
answerBuilder.add("code", VPackValue(res));
} else {
LOG(ERR) << "Unknown operation!";
generateError(rest::ResponseCode::NOT_FOUND,
TRI_ERROR_HTTP_NOT_FOUND);
return;
@ -875,7 +857,6 @@ void RestAqlHandler::handleUseQuery(std::string const& operation, Query* query,
generateError(rest::ResponseCode::BAD, e.code());
return;
} catch (...) {
LOG(ERR) << "OUT OF MEMORY when handling query.";
generateError(rest::ResponseCode::BAD, TRI_ERROR_OUT_OF_MEMORY);
return;
}

View File

@ -2033,6 +2033,7 @@ int flushWalOnAllDBServers(bool waitForSync, bool waitForCollector) {
}
if (nrok != (int)DBservers.size()) {
LOG(WARN) << "could not flush WAL on all servers. confirmed: " << nrok << ", expected: " << DBservers.size();
return TRI_ERROR_INTERNAL;
}

View File

@ -118,7 +118,7 @@ friend class MMFilesGeoIndexIterator;
bool canBeDropped() const override { return true; }
bool isSorted() const override { return false; }
bool isSorted() const override { return true; }
bool hasSelectivityEstimate() const override { return false; }

View File

@ -69,6 +69,10 @@
return shortName;
},
getDatabaseShortName: function (id) {
return this.getCoordinatorShortName(id);
},
getDatabaseServerId: function (shortname) {
var id;
if (window.clusterHealth) {

View File

@ -186,14 +186,15 @@
async: true,
success: function (data) {
if (data.id) {
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.');
console.log(toServer);
arangoHelper.arangoNotification('Shard ' + shardName + ' will be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
window.setTimeout(function () {
window.App.shardsView.render();
}, 3000);
}
},
error: function () {
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseServerId(toServer) + '.');
arangoHelper.arangoError('Shard ' + shardName + ' could not be moved to ' + arangoHelper.getDatabaseShortName(toServer) + '.');
}
});

View File

@ -280,7 +280,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
// hostname and port (e.g. [address]:port)
if (found != std::string::npos && found > 2 && found + 2 < copy.size()) {
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 2));
int64_t value = StringUtils::int64(copy.substr(found + 2));
// check port over-/underrun
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
return nullptr;
}
uint16_t port = static_cast<uint16_t>(value);
std::string host = copy.substr(1, found - 1);
return new EndpointIpV6(type, protocol, encryption, listenBacklog,
@ -306,7 +312,13 @@ Endpoint* Endpoint::factory(const Endpoint::EndpointType type,
// hostname and port
if (found != std::string::npos && found + 1 < copy.size()) {
uint16_t port = (uint16_t)StringUtils::uint32(copy.substr(found + 1));
int64_t value = StringUtils::int64(copy.substr(found + 1));
// check port over-/underrun
if (value < (std::numeric_limits<uint16_t>::min)() || value > (std::numeric_limits<uint16_t>::max)()) {
LOG(ERR) << "specified port number '" << value << "' is outside the allowed range";
return nullptr;
}
uint16_t port = static_cast<uint16_t>(value);
std::string host = copy.substr(0, found);
return new EndpointIpV4(type, protocol, encryption, listenBacklog,