mirror of https://gitee.com/bigwinds/arangodb
Merge branch 'spdvpk' of github.com:arangodb/arangodb into spdvpk
This commit is contained in:
commit
b04ec0f4fe
|
@ -1408,25 +1408,36 @@ AqlValue Expression::executeSimpleExpressionArithmetic(
|
|||
|
||||
VPackBuilder builder;
|
||||
mustDestroy = true; // builder = dynamic data
|
||||
double result;
|
||||
|
||||
switch (node->type) {
|
||||
case NODE_TYPE_OPERATOR_BINARY_PLUS:
|
||||
builder.add(VPackValue(l + r));
|
||||
return AqlValue(builder);
|
||||
result = l + r;
|
||||
break;
|
||||
case NODE_TYPE_OPERATOR_BINARY_MINUS:
|
||||
builder.add(VPackValue(l - r));
|
||||
return AqlValue(builder);
|
||||
result = l - r;
|
||||
break;
|
||||
case NODE_TYPE_OPERATOR_BINARY_TIMES:
|
||||
builder.add(VPackValue(l * r));
|
||||
return AqlValue(builder);
|
||||
result = l * r;
|
||||
break;
|
||||
case NODE_TYPE_OPERATOR_BINARY_DIV:
|
||||
builder.add(VPackValue(l / r));
|
||||
return AqlValue(builder);
|
||||
result = l / r;
|
||||
break;
|
||||
case NODE_TYPE_OPERATOR_BINARY_MOD:
|
||||
builder.add(VPackValue(fmod(l, r)));
|
||||
return AqlValue(builder);
|
||||
result = fmod(l, r);
|
||||
break;
|
||||
default:
|
||||
mustDestroy = false;
|
||||
return AqlValue(VelocyPackHelper::NullValue());
|
||||
}
|
||||
|
||||
if (std::isnan(result) || !std::isfinite(result) || result == HUGE_VAL || result == -HUGE_VAL) {
|
||||
// convert NaN, +inf & -inf to 0
|
||||
mustDestroy = false;
|
||||
builder.add(VPackValue(0.0));
|
||||
return AqlValue(builder);
|
||||
}
|
||||
|
||||
builder.add(VPackValue(result));
|
||||
return AqlValue(builder);
|
||||
}
|
||||
|
|
|
@ -86,6 +86,7 @@ arangodb::aql::AstNode* IndexBlock::makeUnique(
|
|||
}
|
||||
|
||||
void IndexBlock::executeExpressions() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
TRI_ASSERT(_condition != nullptr);
|
||||
|
||||
// The following are needed to evaluate expressions with local data from
|
||||
|
@ -114,9 +115,11 @@ void IndexBlock::executeExpressions() {
|
|||
->getMember(toReplace->andMember)
|
||||
->changeMember(toReplace->operatorMember, evaluatedNode);
|
||||
}
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
int IndexBlock::initialize() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
int res = ExecutionBlock::initialize();
|
||||
|
||||
cleanupNonConstExpressions();
|
||||
|
@ -203,6 +206,7 @@ int IndexBlock::initialize() {
|
|||
}
|
||||
|
||||
return res;
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
// init the ranges for reading, this should be called once per new incoming
|
||||
|
@ -222,6 +226,7 @@ int IndexBlock::initialize() {
|
|||
// _pos to evaluate the variable bounds.
|
||||
|
||||
bool IndexBlock::initIndexes() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
// We start with a different context. Return documents found in the previous
|
||||
// context again.
|
||||
_alreadyReturned.clear();
|
||||
|
@ -304,6 +309,7 @@ bool IndexBlock::initIndexes() {
|
|||
}
|
||||
}
|
||||
return true;
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -311,6 +317,7 @@ bool IndexBlock::initIndexes() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
std::shared_ptr<arangodb::OperationCursor> IndexBlock::createCursor() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
IndexNode const* node = static_cast<IndexNode const*>(getPlanNode());
|
||||
auto outVariable = node->outVariable();
|
||||
auto ast = node->_plan->getAst();
|
||||
|
@ -327,6 +334,7 @@ std::shared_ptr<arangodb::OperationCursor> IndexBlock::createCursor() {
|
|||
_collection->getName(), _indexes[_currentIndex], ast,
|
||||
_condition->getMember(_currentIndex), outVariable, UINT64_MAX,
|
||||
TRI_DEFAULT_BATCH_SIZE, node->_reverse);
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -334,6 +342,7 @@ std::shared_ptr<arangodb::OperationCursor> IndexBlock::createCursor() {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void IndexBlock::startNextCursor() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
|
||||
IndexNode const* node = static_cast<IndexNode const*>(getPlanNode());
|
||||
if (node->_reverse) {
|
||||
|
@ -347,11 +356,13 @@ void IndexBlock::startNextCursor() {
|
|||
} else {
|
||||
_cursor = nullptr;
|
||||
}
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
// this is called every time everything in _documents has been passed on
|
||||
|
||||
bool IndexBlock::readIndex(size_t atMost) {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
// this is called every time we want more in _documents.
|
||||
// For the primary key index, this only reads the index once, and never
|
||||
// again (although there might be multiple calls to this function).
|
||||
|
@ -416,9 +427,11 @@ bool IndexBlock::readIndex(size_t atMost) {
|
|||
}
|
||||
_posInDocs = 0;
|
||||
return (!_documents.empty());
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
int IndexBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
int res = ExecutionBlock::initializeCursor(items, pos);
|
||||
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
|
@ -428,6 +441,7 @@ int IndexBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
|
|||
_posInDocs = 0;
|
||||
|
||||
return TRI_ERROR_NO_ERROR;
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -435,6 +449,7 @@ int IndexBlock::initializeCursor(AqlItemBlock* items, size_t pos) {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
AqlItemBlock* IndexBlock::getSome(size_t atLeast, size_t atMost) {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
if (_done) {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -528,6 +543,7 @@ AqlItemBlock* IndexBlock::getSome(size_t atLeast, size_t atMost) {
|
|||
// Clear out registers no longer needed later:
|
||||
clearRegisters(res.get());
|
||||
return res.release();
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -535,6 +551,7 @@ AqlItemBlock* IndexBlock::getSome(size_t atLeast, size_t atMost) {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
size_t IndexBlock::skipSome(size_t atLeast, size_t atMost) {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
if (_done) {
|
||||
return 0;
|
||||
}
|
||||
|
@ -589,6 +606,7 @@ size_t IndexBlock::skipSome(size_t atLeast, size_t atMost) {
|
|||
}
|
||||
|
||||
return skipped;
|
||||
DEBUG_END_BLOCK();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -57,10 +57,12 @@ void IndexNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
|||
|
||||
nodes.add(VPackValue("indexes"));
|
||||
{
|
||||
#warning Old Implementation did use AqlIndex -> toVelocyPack, that contained unique, sparse, selectivity etc.
|
||||
VPackArrayBuilder guard(&nodes);
|
||||
for (auto& index : _indexes) {
|
||||
nodes.add(VPackValue(index));
|
||||
arangodb::Index* idx = trx()->getIndexByIdentifier(_collection->name, index);
|
||||
nodes.openObject();
|
||||
idx->toVelocyPack(nodes, false);
|
||||
nodes.close();
|
||||
}
|
||||
}
|
||||
nodes.add(VPackValue("condition"));
|
||||
|
@ -108,12 +110,8 @@ IndexNode::IndexNode(ExecutionPlan* plan, arangodb::basics::Json const& json)
|
|||
|
||||
for (size_t i = 0; i < length; ++i) {
|
||||
auto entry = TRI_LookupArrayJson(indexes, i);
|
||||
if (!TRI_IsStringJson(entry)) {
|
||||
std::string msg = "The attribute index id is not a string.";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, msg);
|
||||
}
|
||||
_indexes.emplace_back(
|
||||
std::string(entry->_value._string.data, entry->_value._string.length - 1));
|
||||
std::string iid = JsonHelper::checkAndGetStringValue(entry, "id");
|
||||
_indexes.emplace_back(iid);
|
||||
}
|
||||
|
||||
TRI_json_t const* condition =
|
||||
|
|
|
@ -360,6 +360,7 @@ std::shared_ptr<VPackBuilder> Index::toVelocyPack(bool withFigures) const {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create a VelocyPack representation of the index
|
||||
/// base functionality (called from derived classes)
|
||||
/// note: needs an already-opened object as its input!
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void Index::toVelocyPack(VPackBuilder& builder, bool withFigures) const {
|
||||
|
|
|
@ -107,6 +107,9 @@ struct TRI_index_element_t {
|
|||
void* space = TRI_Allocate(
|
||||
TRI_UNKNOWN_MEM_ZONE,
|
||||
sizeof(TRI_doc_mptr_t*) + (sizeof(TRI_vpack_sub_t) * numSubs), false);
|
||||
if (space == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
// FIXME: catch nullptr case?
|
||||
return new (space) TRI_index_element_t();
|
||||
}
|
||||
|
|
|
@ -482,6 +482,14 @@ class Transaction {
|
|||
int setupState() { return _setupState; }
|
||||
|
||||
TRI_document_collection_t* documentCollection(TRI_voc_cid_t) const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get the index by its identifier. Will either throw or
|
||||
/// return a valid index. nullptr is impossible.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
arangodb::Index* getIndexByIdentifier(std::string const& collectionName,
|
||||
std::string const& indexId);
|
||||
|
||||
private:
|
||||
|
||||
|
@ -568,7 +576,7 @@ class Transaction {
|
|||
|
||||
OperationResult countCoordinator(std::string const& collectionName);
|
||||
OperationResult countLocal(std::string const& collectionName);
|
||||
|
||||
|
||||
protected:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -646,14 +654,6 @@ class Transaction {
|
|||
|
||||
std::vector<arangodb::Index*> indexesForCollection(std::string const&) const;
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief get the index by it's identifier. Will either throw or
|
||||
/// return a valid index. nullptr is impossible.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
arangodb::Index* getIndexByIdentifier(std::string const& collectionName,
|
||||
std::string const& indexId);
|
||||
|
||||
private:
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -4006,7 +4006,8 @@ int TRI_document_collection_t::insertSecondaryIndexes(
|
|||
// in case of no-memory, return immediately
|
||||
if (res == TRI_ERROR_OUT_OF_MEMORY) {
|
||||
return res;
|
||||
} else if (res != TRI_ERROR_NO_ERROR) {
|
||||
}
|
||||
if (res != TRI_ERROR_NO_ERROR) {
|
||||
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED ||
|
||||
result == TRI_ERROR_NO_ERROR) {
|
||||
// "prefer" unique constraint violated
|
||||
|
|
|
@ -3675,7 +3675,7 @@ function ahuacatlUpdateSuite () {
|
|||
db._drop("UnitTestsAhuacatlEdge");
|
||||
var edge = db._createEdgeCollection("UnitTestsAhuacatlEdge");
|
||||
|
||||
assertQueryError(errors.ERROR_ARANGO_DOCUMENT_HANDLE_BAD.code, "FOR i IN 1..50 UPSERT { foo: 1 } INSERT { foo: 'bar'} UPDATE { } INTO @@cn", { "@cn": edge.name() });
|
||||
assertQueryError(errors.ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE.code, "FOR i IN 1..50 UPSERT { foo: 1 } INSERT { foo: 'bar'} UPDATE { } INTO @@cn", { "@cn": edge.name() });
|
||||
assertEqual(0, edge.count());
|
||||
|
||||
db._drop("UnitTestsAhuacatlEdge");
|
||||
|
@ -3689,7 +3689,7 @@ function ahuacatlUpdateSuite () {
|
|||
db._drop("UnitTestsAhuacatlEdge");
|
||||
var edge = db._createEdgeCollection("UnitTestsAhuacatlEdge");
|
||||
|
||||
assertQueryError(errors.ERROR_ARANGO_DOCUMENT_HANDLE_BAD.code, "FOR i IN 1..50 UPSERT { foo: 1 } INSERT { _to: CONCAT('UnitTestsAhuacatlUpdate1/', TO_STRING(i)) } UPDATE { } INTO @@cn", { "@cn": edge.name() });
|
||||
assertQueryError(errors.ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE.code, "FOR i IN 1..50 UPSERT { foo: 1 } INSERT { _to: CONCAT('UnitTestsAhuacatlUpdate1/', TO_STRING(i)) } UPDATE { } INTO @@cn", { "@cn": edge.name() });
|
||||
assertEqual(0, edge.count());
|
||||
|
||||
db._drop("UnitTestsAhuacatlEdge");
|
||||
|
@ -3703,7 +3703,7 @@ function ahuacatlUpdateSuite () {
|
|||
db._drop("UnitTestsAhuacatlEdge");
|
||||
var edge = db._createEdgeCollection("UnitTestsAhuacatlEdge");
|
||||
|
||||
assertQueryError(errors.ERROR_ARANGO_DOCUMENT_HANDLE_BAD.code, "FOR i IN 1..50 UPSERT { foo: 1 } INSERT { _from: CONCAT('UnitTestsAhuacatlUpdate1/', TO_STRING(i)) } UPDATE { } INTO @@cn", { "@cn": edge.name() });
|
||||
assertQueryError(errors.ERROR_ARANGO_INVALID_EDGE_ATTRIBUTE.code, "FOR i IN 1..50 UPSERT { foo: 1 } INSERT { _from: CONCAT('UnitTestsAhuacatlUpdate1/', TO_STRING(i)) } UPDATE { } INTO @@cn", { "@cn": edge.name() });
|
||||
assertEqual(0, edge.count());
|
||||
|
||||
db._drop("UnitTestsAhuacatlEdge");
|
||||
|
|
|
@ -441,7 +441,7 @@ function ahuacatlQueryOptimizerLimitTestSuite () {
|
|||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief check limit optimisation with index
|
||||
/// @brief check limit optimization with index
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testLimitFullCollectionHashIndex1 : function () {
|
||||
|
|
|
@ -1051,7 +1051,7 @@ function ahuacatlQuerySimpleTestSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testOverflowExecutionInt: function () {
|
||||
assertEqual([ null ], getQueryResults("FOR l IN [ 33939359949454345354858882332 ] RETURN l * l * l * l * l * l * l * l * l * l * l"));
|
||||
assertEqual([ 0 ], getQueryResults("FOR l IN [ 33939359949454345354858882332 ] RETURN l * l * l * l * l * l * l * l * l * l * l"));
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -1067,7 +1067,7 @@ function ahuacatlQuerySimpleTestSuite () {
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testUnderflowExecutionInt: function () {
|
||||
assertEqual([ null ], getQueryResults("FOR l IN [ -33939359949454345354858882332 ] RETURN l * l * l * l * l * l * l * l * l * l * l"));
|
||||
assertEqual([ 0 ], getQueryResults("FOR l IN [ -33939359949454345354858882332 ] RETURN l * l * l * l * l * l * l * l * l * l * l"));
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -270,77 +270,6 @@ function ahuacatlQueryCacheTestSuite () {
|
|||
assertEqual([ ], result.json);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test adding indexes
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testAddIndexCapConstraint : function () {
|
||||
var query = "FOR doc IN @@collection SORT doc.value RETURN doc.value";
|
||||
var result, i;
|
||||
|
||||
for (i = 1; i <= 5; ++i) {
|
||||
c1.save({ value: i });
|
||||
}
|
||||
|
||||
AQL_QUERY_CACHE_PROPERTIES({ mode: "on" });
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertFalse(result.cached);
|
||||
assertEqual([ 1, 2, 3, 4, 5 ], result.json);
|
||||
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertTrue(result.cached);
|
||||
assertEqual([ 1, 2, 3, 4, 5 ], result.json);
|
||||
|
||||
c1.ensureCapConstraint(3);
|
||||
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertFalse(result.cached);
|
||||
assertEqual([ 3, 4, 5 ], result.json);
|
||||
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertTrue(result.cached);
|
||||
assertEqual([ 3, 4, 5 ], result.json);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test dropping indexes
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testDropIndexCapConstraint : function () {
|
||||
var query = "FOR doc IN @@collection SORT doc.value RETURN doc.value";
|
||||
var result, i;
|
||||
|
||||
c1.ensureCapConstraint(3);
|
||||
for (i = 1; i <= 5; ++i) {
|
||||
c1.save({ value: i });
|
||||
}
|
||||
|
||||
AQL_QUERY_CACHE_PROPERTIES({ mode: "on" });
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertFalse(result.cached);
|
||||
assertEqual([ 3, 4, 5 ], result.json);
|
||||
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertTrue(result.cached);
|
||||
assertEqual([ 3, 4, 5 ], result.json);
|
||||
|
||||
var indexes = c1.getIndexes();
|
||||
assertEqual(2, indexes.length);
|
||||
assertEqual("cap", indexes[1].type);
|
||||
assertTrue(c1.dropIndex(indexes[1].id));
|
||||
|
||||
indexes = c1.getIndexes();
|
||||
assertEqual(1, indexes.length);
|
||||
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertFalse(result.cached);
|
||||
assertEqual([ 3, 4, 5 ], result.json);
|
||||
|
||||
result = AQL_EXECUTE(query, { "@collection": c1.name() });
|
||||
assertTrue(result.cached);
|
||||
assertEqual([ 3, 4, 5 ], result.json);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test queries w/ parse error
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -112,8 +112,14 @@ v8::Handle<v8::Value> TRI_VPackToV8(v8::Isolate* isolate,
|
|||
return v8::Null(isolate);
|
||||
case VPackValueType::Bool:
|
||||
return v8::Boolean::New(isolate, slice.getBool());
|
||||
case VPackValueType::Double:
|
||||
case VPackValueType::Double: {
|
||||
// convert NaN, +inf & -inf to null
|
||||
double value = slice.getDouble();
|
||||
if (std::isnan(value) || !std::isfinite(value) || value == HUGE_VAL || value == -HUGE_VAL) {
|
||||
return v8::Null(isolate);
|
||||
}
|
||||
return v8::Number::New(isolate, slice.getDouble());
|
||||
}
|
||||
case VPackValueType::Int:
|
||||
return v8::Number::New(isolate, static_cast<double>(slice.getInt()));
|
||||
case VPackValueType::UInt:
|
||||
|
|
Loading…
Reference in New Issue