mirror of https://gitee.com/bigwinds/arangodb
Resolved or removed some TODOs (#8756)
This commit is contained in:
parent
ea27515595
commit
62ed576c8e
|
@ -74,7 +74,7 @@ BlocksWithClients::BlocksWithClients(ExecutionEngine* engine, ExecutionNode cons
|
|||
}
|
||||
|
||||
std::pair<ExecutionState, bool> BlocksWithClients::getBlock(size_t atMost) {
|
||||
ExecutionBlock::throwIfKilled(); // check if we were aborted
|
||||
throwIfKilled(); // check if we were aborted
|
||||
|
||||
auto res = _dependencies[0]->getSome(atMost);
|
||||
if (res.first == ExecutionState::WAITING) {
|
||||
|
@ -123,3 +123,9 @@ size_t BlocksWithClients::getClientId(std::string const& shardId) const {
|
|||
}
|
||||
return ((*it).second);
|
||||
}
|
||||
|
||||
void BlocksWithClients::throwIfKilled() {
|
||||
if (_engine->getQuery()->killed()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -90,6 +90,9 @@ class BlocksWithClients : public ExecutionBlock {
|
|||
/// corresponding to <shardId>
|
||||
size_t getClientId(std::string const& shardId) const;
|
||||
|
||||
/// @brief throw an exception if query was killed
|
||||
void throwIfKilled();
|
||||
|
||||
/// @brief _shardIdMap: map from shardIds to clientNrs
|
||||
std::unordered_map<std::string, size_t> _shardIdMap;
|
||||
|
||||
|
|
|
@ -123,15 +123,11 @@ std::unique_ptr<ExecutionBlock> RemoteNode::createBlock(
|
|||
}
|
||||
|
||||
std::unordered_set<RegisterId> regsToClear = getRegsToClear();
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
// TODO This is only for me to find out about the current behaviour. Should
|
||||
// probably be either removed, or made into an assert.
|
||||
if (!regsToClear.empty()) {
|
||||
LOG_TOPIC("4fd06", WARN, Logger::AQL)
|
||||
<< "RemoteBlock has registers to clear. "
|
||||
<< "Shouldn't this be done before network?";
|
||||
}
|
||||
#endif
|
||||
|
||||
// Everything that is cleared here could and should have been cleared before,
|
||||
// i.e. before sending it over the network.
|
||||
TRI_ASSERT(regsToClear.empty());
|
||||
|
||||
ExecutorInfos infos({}, {}, nrInRegs, nrOutRegs, std::move(regsToClear),
|
||||
std::move(regsToKeep));
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "ExecutionBlock.h"
|
||||
|
||||
#include "Aql/Ast.h"
|
||||
#include "Aql/BlockCollector.h"
|
||||
#include "Aql/InputAqlItemRow.h"
|
||||
|
@ -48,12 +49,6 @@ ExecutionBlock::ExecutionBlock(ExecutionEngine* engine, ExecutionNode const* ep)
|
|||
|
||||
ExecutionBlock::~ExecutionBlock() = default;
|
||||
|
||||
void ExecutionBlock::throwIfKilled() { // TODO Scatter & DistributeExecutor still using
|
||||
if (_engine->getQuery()->killed()) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_QUERY_KILLED);
|
||||
}
|
||||
}
|
||||
|
||||
std::pair<ExecutionState, Result> ExecutionBlock::initializeCursor(InputAqlItemRow const& input) {
|
||||
if (_dependencyPos == _dependencies.end()) {
|
||||
// We need to start again.
|
||||
|
|
|
@ -256,9 +256,6 @@ class ExecutionBlock {
|
|||
_dependencyPos = _dependencies.end();
|
||||
}
|
||||
|
||||
/// @brief throw an exception if query was killed
|
||||
void throwIfKilled();
|
||||
|
||||
protected:
|
||||
/// @brief the execution engine
|
||||
ExecutionEngine* _engine;
|
||||
|
|
|
@ -128,15 +128,12 @@ std::pair<ExecutionState, SharedAqlItemBlockPtr> ExecutionBlockImpl<Executor>::g
|
|||
_outputItemRow = createOutputRow(newBlock);
|
||||
}
|
||||
|
||||
// TODO It's not very obvious that `state` will be initialized, because
|
||||
// it's not obvious that the loop will run at least once (e.g. after a
|
||||
// WAITING). It should, but I'd like that to be clearer. Initializing here
|
||||
// won't help much because it's unclear whether the value will be correct.
|
||||
ExecutionState state = ExecutionState::HASMORE;
|
||||
ExecutorStats executorStats{};
|
||||
|
||||
TRI_ASSERT(atMost > 0);
|
||||
|
||||
// The loop has to be entered at least once!
|
||||
TRI_ASSERT(!_outputItemRow->isFull());
|
||||
while (!_outputItemRow->isFull()) {
|
||||
std::tie(state, executorStats) = _executor.produceRow(*_outputItemRow);
|
||||
|
@ -152,10 +149,6 @@ std::pair<ExecutionState, SharedAqlItemBlockPtr> ExecutionBlockImpl<Executor>::g
|
|||
}
|
||||
|
||||
if (state == ExecutionState::DONE) {
|
||||
// TODO Does this work as expected when there was no row produced, or
|
||||
// we were DONE already, so we didn't build a single row?
|
||||
// We must return nullptr then, because empty AqlItemBlocks are not
|
||||
// allowed!
|
||||
auto outputBlock = _outputItemRow->stealBlock();
|
||||
// This is not strictly necessary here, as we shouldn't be called again
|
||||
// after DONE.
|
||||
|
@ -174,7 +167,6 @@ std::pair<ExecutionState, SharedAqlItemBlockPtr> ExecutionBlockImpl<Executor>::g
|
|||
auto outputBlock = _outputItemRow->stealBlock();
|
||||
// we guarantee that we do return a valid pointer in the HASMORE case.
|
||||
TRI_ASSERT(outputBlock != nullptr);
|
||||
// TODO OutputAqlItemRow could get "reset" and "isValid" methods and be reused
|
||||
_outputItemRow.reset();
|
||||
return {state, std::move(outputBlock)};
|
||||
}
|
||||
|
|
|
@ -2031,18 +2031,6 @@ std::unique_ptr<ExecutionBlock> ReturnNode::createBlock(
|
|||
|
||||
RegisterId inputRegister = variableToRegisterId(_inVariable);
|
||||
|
||||
// TODO - remove LOGGING once register planning changes have been made and the ReturnExecutor is final
|
||||
// LOG_DEVEL << "-------------------------------";
|
||||
// LOG_DEVEL << "inputRegister: " << inputRegister;
|
||||
// LOG_DEVEL << "input block width: " << getRegisterPlan()->nrRegs[previousNode->getDepth()];
|
||||
// LOG_DEVEL << "ouput block width: " << getRegisterPlan()->nrRegs[getDepth()];
|
||||
|
||||
// std::stringstream ss;
|
||||
// for(auto const& a : getRegsToClear()){
|
||||
// ss << a << " ";
|
||||
//}
|
||||
// LOG_DEVEL << "registersToClear: " << ss.rdbuf();
|
||||
|
||||
bool const isRoot = plan()->root() == this;
|
||||
|
||||
bool const isDBServer = arangodb::ServerState::instance()->isDBServer();
|
||||
|
|
|
@ -108,9 +108,6 @@ class HashedCollectExecutor {
|
|||
struct Properties {
|
||||
static const bool preservesOrder = false;
|
||||
static const bool allowsBlockPassthrough = false;
|
||||
// TODO This should be true, but the current implementation in
|
||||
// ExecutionBlockImpl and the fetchers does not work with this.
|
||||
// It will however always only overfetch if activated, never underfetch
|
||||
static const bool inputSizeRestrictsOutputSize = true;
|
||||
};
|
||||
using Fetcher = SingleRowFetcher<Properties::allowsBlockPassthrough>;
|
||||
|
|
|
@ -220,15 +220,3 @@ std::tuple<Variable const*, AstNode const*, bool> SortCondition::field(size_t po
|
|||
SortField const& field = _fields[position];
|
||||
return std::make_tuple(field.variable, field.node, field.order);
|
||||
}
|
||||
|
||||
/// @brief toVelocyPack
|
||||
void SortCondition::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
// TODO FIXME implement
|
||||
}
|
||||
|
||||
std::shared_ptr<SortCondition> SortCondition::fromVelocyPack(ExecutionPlan const* plan,
|
||||
arangodb::velocypack::Slice const& base,
|
||||
std::string const& name) {
|
||||
// TODO FIXME implement
|
||||
return nullptr;
|
||||
}
|
||||
|
|
|
@ -91,13 +91,6 @@ class SortCondition {
|
|||
/// the sort order is ascending (true) or descending (false)
|
||||
std::tuple<Variable const*, AstNode const*, bool> field(size_t position) const;
|
||||
|
||||
/// @brief export to VelocyPack
|
||||
void toVelocyPackHelper(arangodb::velocypack::Builder&, bool) const;
|
||||
|
||||
static std::shared_ptr<SortCondition> fromVelocyPack(ExecutionPlan const* plan,
|
||||
arangodb::velocypack::Slice const& base,
|
||||
std::string const& name);
|
||||
|
||||
private:
|
||||
struct SortField {
|
||||
Variable const* variable;
|
||||
|
|
|
@ -237,7 +237,7 @@ void SortedCollectExecutor::CollectGroup::groupValuesToArray(VPackBuilder& build
|
|||
}
|
||||
|
||||
void SortedCollectExecutor::CollectGroup::writeToOutput(OutputAqlItemRow& output) {
|
||||
// Thanks to the edge case that we have to emmit a row even if we have no
|
||||
// Thanks to the edge case that we have to emit a row even if we have no
|
||||
// input We cannot assert here that the input row is valid ;(
|
||||
size_t i = 0;
|
||||
for (auto& it : infos.getGroupRegisters()) {
|
||||
|
@ -263,7 +263,7 @@ void SortedCollectExecutor::CollectGroup::writeToOutput(OutputAqlItemRow& output
|
|||
if (infos.getCollectRegister() != ExecutionNode::MaxRegisterId) {
|
||||
if (infos.getCount()) {
|
||||
// only set group count in result register
|
||||
output.cloneValueInto(infos.getCollectRegister(), _lastInputRow, // TODO check move
|
||||
output.cloneValueInto(infos.getCollectRegister(), _lastInputRow,
|
||||
AqlValue(AqlValueHintUInt(static_cast<uint64_t>(this->groupLength))));
|
||||
} else {
|
||||
TRI_ASSERT(_builder.isOpenArray());
|
||||
|
|
|
@ -163,9 +163,6 @@ class SortedCollectExecutor {
|
|||
struct Properties {
|
||||
static const bool preservesOrder = false;
|
||||
static const bool allowsBlockPassthrough = false;
|
||||
// TODO This should be true, but the current implementation in
|
||||
// ExecutionBlockImpl and the fetchers does not work with this.
|
||||
// It will however always only overfetch if activated, never underfetch
|
||||
static const bool inputSizeRestrictsOutputSize = true;
|
||||
};
|
||||
using Fetcher = SingleRowFetcher<Properties::allowsBlockPassthrough>;
|
||||
|
|
Loading…
Reference in New Issue