1
0
Fork 0

Merge branch 'devel' of https://github.com/arangodb/arangodb into devel

This commit is contained in:
Kaveh Vahedipour 2016-06-07 12:31:45 +02:00
commit 6cf7caad7c
113 changed files with 2678 additions and 2002 deletions

View File

@ -55,9 +55,9 @@ class NonCopyable {
// prevent heap allocation
struct NonHeapAllocatable {
void* operator new(std::size_t) throw(std::bad_alloc) = delete;
void operator delete(void*) throw() = delete;
void operator delete(void*) noexcept = delete;
void* operator new[](std::size_t) throw(std::bad_alloc) = delete;
void operator delete[](void*) throw() = delete;
void operator delete[](void*) noexcept = delete;
};
#ifdef _WIN32

View File

@ -309,11 +309,11 @@ class Builder {
return _pos;
}
bool isEmpty() const throw() { return _pos == 0; }
bool isEmpty() const noexcept { return _pos == 0; }
bool isClosed() const throw() { return _stack.empty(); }
bool isClosed() const noexcept { return _stack.empty(); }
bool isOpenArray() const throw() {
bool isOpenArray() const noexcept {
if (_stack.empty()) {
return false;
}
@ -321,7 +321,7 @@ class Builder {
return _start[tos] == 0x06 || _start[tos] == 0x13;
}
bool isOpenObject() const throw() {
bool isOpenObject() const noexcept {
if (_stack.empty()) {
return false;
}

View File

@ -68,6 +68,9 @@ struct Exception : std::exception {
BuilderKeyAlreadyWritten = 38,
BuilderKeyMustBeString = 39,
ValidatorInvalidLength = 50,
ValidatorInvalidType = 51,
UnknownError = 999
};
@ -83,11 +86,11 @@ struct Exception : std::exception {
explicit Exception(ExceptionType type) : Exception(type, message(type)) {}
char const* what() const throw() { return _msg.c_str(); }
char const* what() const noexcept { return _msg.c_str(); }
ExceptionType errorCode() const throw() { return _type; }
ExceptionType errorCode() const noexcept { return _type; }
static char const* message(ExceptionType type) throw() {
static char const* message(ExceptionType type) noexcept {
switch (type) {
case InternalError:
return "Internal error";
@ -139,6 +142,11 @@ struct Exception : std::exception {
return "The key of the next key/value pair is already written";
case BuilderKeyMustBeString:
return "The key of the next key/value pair must be a string";
case ValidatorInvalidType:
return "Invalid type found in binary data";
case ValidatorInvalidLength:
return "Invalid length found in binary data";
case UnknownError:
default:

View File

@ -42,9 +42,8 @@ class ArrayIterator {
public:
ArrayIterator() = delete;
ArrayIterator(Slice const& slice, bool allowRandomIteration = false)
: _slice(slice), _size(_slice.length()), _position(0), _current(nullptr),
_allowRandomIteration(allowRandomIteration) {
explicit ArrayIterator(Slice const& slice)
: _slice(slice), _size(_slice.length()), _position(0), _current(nullptr) {
if (slice.type() != ValueType::Array) {
throw Exception(Exception::InvalidValueType, "Expecting Array slice");
}
@ -56,15 +55,13 @@ class ArrayIterator {
: _slice(other._slice),
_size(other._size),
_position(other._position),
_current(other._current),
_allowRandomIteration(other._allowRandomIteration) {}
_current(other._current) {}
ArrayIterator& operator=(ArrayIterator const& other) {
_slice = other._slice;
_size = other._size;
_position = other._position;
_current = other._current;
_allowRandomIteration = other._allowRandomIteration;
return *this;
}
@ -97,23 +94,23 @@ class ArrayIterator {
return _slice.at(_position);
}
ArrayIterator begin() { return ArrayIterator(_slice, _allowRandomIteration); }
ArrayIterator begin() { return ArrayIterator(_slice); }
ArrayIterator begin() const { return ArrayIterator(_slice, _allowRandomIteration); }
ArrayIterator begin() const { return ArrayIterator(_slice); }
ArrayIterator end() {
auto it = ArrayIterator(_slice, _allowRandomIteration);
auto it = ArrayIterator(_slice);
it._position = it._size;
return it;
}
ArrayIterator end() const {
auto it = ArrayIterator(_slice, _allowRandomIteration);
auto it = ArrayIterator(_slice);
it._position = it._size;
return it;
}
inline bool valid() const throw() { return (_position < _size); }
inline bool valid() const noexcept { return (_position < _size); }
inline Slice value() const {
if (_position >= _size) {
@ -122,18 +119,18 @@ class ArrayIterator {
return operator*();
}
inline bool next() throw() {
inline bool next() noexcept {
operator++();
return valid();
}
inline ValueLength index() const throw() { return _position; }
inline ValueLength index() const noexcept { return _position; }
inline ValueLength size() const throw() { return _size; }
inline ValueLength size() const noexcept { return _size; }
inline bool isFirst() const throw() { return (_position == 0); }
inline bool isFirst() const noexcept { return (_position == 0); }
inline bool isLast() const throw() { return (_position + 1 >= _size); }
inline bool isLast() const noexcept { return (_position + 1 >= _size); }
inline void forward(ValueLength count) {
if (_position + count >= _size) {
@ -161,7 +158,7 @@ class ArrayIterator {
auto h = _slice.head();
if (h == 0x13) {
_current = _slice.at(0).start();
} else if (_allowRandomIteration) {
} else {
_current = _slice.begin() + _slice.findDataOffset(h);
}
}
@ -172,7 +169,6 @@ class ArrayIterator {
ValueLength _size;
ValueLength _position;
uint8_t const* _current;
bool _allowRandomIteration;
};
class ObjectIterator {
@ -267,7 +263,7 @@ class ObjectIterator {
return it;
}
inline bool valid() const throw() { return (_position < _size); }
inline bool valid() const noexcept { return (_position < _size); }
inline Slice key(bool translate = true) const {
if (_position >= _size) {
@ -290,18 +286,18 @@ class ObjectIterator {
return _slice.getNthValue(_position);
}
inline bool next() throw() {
inline bool next() noexcept {
operator++();
return valid();
}
inline ValueLength index() const throw() { return _position; }
inline ValueLength index() const noexcept { return _position; }
inline ValueLength size() const throw() { return _size; }
inline ValueLength size() const noexcept { return _size; }
inline bool isFirst() const throw() { return (_position == 0); }
inline bool isFirst() const noexcept { return (_position == 0); }
inline bool isLast() const throw() { return (_position + 1 >= _size); }
inline bool isLast() const noexcept { return (_position + 1 >= _size); }
private:
Slice _slice;

View File

@ -89,7 +89,7 @@ class Parser {
Options const* options;
Parser(Parser const&) = delete;
Parser(Parser &&) = delete;
Parser(Parser&&) = delete;
Parser& operator=(Parser const&) = delete;
Parser& operator=(Parser&&) = delete;
~Parser() = default;
@ -209,7 +209,7 @@ class Parser {
ValueLength parseInternal(bool multi);
inline bool isWhiteSpace(uint8_t i) const throw() {
inline bool isWhiteSpace(uint8_t i) const noexcept {
return (i == ' ' || i == '\t' || i == '\n' || i == '\r');
}

View File

@ -42,12 +42,18 @@
#include "velocypack/Value.h"
#include "velocypack/ValueType.h"
#ifndef VELOCYPACK_HASH
#ifdef VELOCYPACK_XXHASH
// forward for XXH64 function declared elsewhere
extern "C" unsigned long long XXH64(void const*, size_t, unsigned long long);
#define VELOCYPACK_HASH(mem, size, seed) XXH64(mem, size, seed)
#endif
#ifdef VELOCYPACK_FASTHASH
// forward for fasthash64 function declared elsewhere
uint64_t fasthash64(void const*, size_t, uint64_t);
#define VELOCYPACK_HASH(mem, size, seed) fasthash64(mem, size, seed)
#endif
namespace arangodb {
@ -355,7 +361,7 @@ class Slice {
}
// extract the nth key from an Object
Slice getNthKey(ValueLength index, bool) const;
Slice getNthKey(ValueLength index, bool translate) const;
// extract the nth value from an Object
Slice getNthValue(ValueLength index) const {
@ -758,6 +764,9 @@ class Slice {
}
return 9;
}
// get the offset for the nth member from an Array type
ValueLength getNthOffset(ValueLength index) const;
Slice makeKey() const;
@ -814,9 +823,6 @@ class Slice {
Slice getFromCompactObject(std::string const& attribute) const;
// get the offset for the nth member from an Array type
ValueLength getNthOffset(ValueLength index) const;
// extract the nth member from an Array
Slice getNth(ValueLength index) const;

View File

@ -0,0 +1,72 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief Library to build up VPack documents.
///
/// DISCLAIMER
///
/// Copyright 2015 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Max Neunhoeffer
/// @author Jan Steemann
/// @author Copyright 2015, ArangoDB GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#ifndef VELOCYPACK_VALIDATOR_H
#define VELOCYPACK_VALIDATOR_H 1
#include "velocypack/velocypack-common.h"
#include "velocypack/Options.h"
namespace arangodb {
namespace velocypack {
class Slice;
class Validator {
// This class can validate a binary VelocyPack value.
public:
explicit Validator(Options const* options = &Options::Defaults)
: options(options) {
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
}
}
~Validator() = default;
public:
// validates a VelocyPack Slice value starting at ptr, with length bytes length
// throws if the data is invalid
bool validate(char const* ptr, size_t length, bool isSubPart = false) const {
return validate(reinterpret_cast<uint8_t const*>(ptr), length, isSubPart);
}
// validates a VelocyPack Slice value starting at ptr, with length bytes length
// throws if the data is invalid
bool validate(uint8_t const* ptr, size_t length, bool isSubPart = false) const;
private:
void validateBufferLength(size_t expected, size_t actual, bool isSubPart) const;
void validateSliceLength(uint8_t const* ptr, size_t length, bool isSubPart) const;
public:
Options const* options;
};
} // namespace arangodb::velocypack
} // namespace arangodb
#endif

View File

@ -88,7 +88,7 @@ static constexpr std::size_t checkOverflow(ValueLength length) {
#endif
// calculate the length of a variable length integer in unsigned LEB128 format
static inline ValueLength getVariableValueLength(ValueLength value) throw() {
static inline ValueLength getVariableValueLength(ValueLength value) noexcept {
ValueLength len = 1;
while (value >= 0x80) {
value >>= 7;
@ -139,7 +139,7 @@ static inline void storeVariableValueLength(uint8_t* dst, ValueLength value) {
// returns current value for UTCDate
int64_t currentUTCDateValue();
static inline uint64_t toUInt64(int64_t v) throw() {
static inline uint64_t toUInt64(int64_t v) noexcept {
// If v is negative, we need to add 2^63 to make it positive,
// before we can cast it to an uint64_t:
uint64_t shift2 = 1ULL << 63;
@ -151,7 +151,7 @@ static inline uint64_t toUInt64(int64_t v) throw() {
// uint64_t is not guaranteed to work for negative values!
}
static inline int64_t toInt64(uint64_t v) throw() {
static inline int64_t toInt64(uint64_t v) noexcept {
uint64_t shift2 = 1ULL << 63;
int64_t shift = static_cast<int64_t>(shift2 - 1);
return v >= shift2 ? (static_cast<int64_t>(v - shift2) - shift) - 1
@ -161,7 +161,7 @@ static inline int64_t toInt64(uint64_t v) throw() {
// read an unsigned little endian integer value of the
// specified length, starting at the specified byte offset
template <typename T>
static inline T readInteger(uint8_t const* start, ValueLength length) throw() {
static inline T readInteger(uint8_t const* start, ValueLength length) noexcept {
uint64_t value = 0;
uint64_t x = 0;
uint8_t const* end = start + length;
@ -172,11 +172,11 @@ static inline T readInteger(uint8_t const* start, ValueLength length) throw() {
return value;
}
static inline uint64_t readUInt64(uint8_t const* start) throw() {
static inline uint64_t readUInt64(uint8_t const* start) noexcept {
return readInteger<uint64_t>(start, 8);
}
static inline void storeUInt64(uint8_t* start, uint64_t value) throw() {
static inline void storeUInt64(uint8_t* start, uint64_t value) noexcept {
uint8_t const* end = start + 8;
do {
*start++ = static_cast<uint8_t>(value & 0xff);

View File

@ -309,7 +309,7 @@ void Dumper::dumpValue(Slice const* slice, Slice const* base) {
}
case ValueType::Array: {
ArrayIterator it(*slice, true);
ArrayIterator it(*slice);
_sink->push_back('[');
if (options->prettyPrint) {
_sink->push_back('\n');

View File

@ -423,7 +423,7 @@ uint64_t Slice::normalizedHash(uint64_t seed) const {
// over all array members
uint64_t const n = length() ^ 0xba5bedf00d;
value = VELOCYPACK_HASH(&n, sizeof(n), seed);
for (auto const& it : ArrayIterator(*this, true)) {
for (auto const& it : ArrayIterator(*this)) {
value ^= it.normalizedHash(value);
}
} else if (isObject()) {

318
3rdParty/velocypack/src/Validator.cpp vendored Normal file
View File

@ -0,0 +1,318 @@
////////////////////////////////////////////////////////////////////////////////
/// @brief Library to build up VPack documents.
///
/// DISCLAIMER
///
/// Copyright 2015 ArangoDB GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
///
/// @author Max Neunhoeffer
/// @author Jan Steemann
/// @author Copyright 2015, ArangoDB GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
#include "velocypack/velocypack-common.h"
#include "velocypack/Validator.h"
#include "velocypack/Exception.h"
#include "velocypack/Slice.h"
#include "velocypack/ValueType.h"
using namespace arangodb::velocypack;
bool Validator::validate(uint8_t const* ptr, size_t length, bool isSubPart) const {
if (length == 0) {
throw Exception(Exception::ValidatorInvalidLength, "length 0 is invalid for any VelocyPack value");
}
uint8_t const head = *ptr;
// type() only reads the first byte, which is safe
ValueType const type = Slice(ptr).type();
if (type == ValueType::None && head != 0x00) {
// invalid type
throw Exception(Exception::ValidatorInvalidType);
}
// special handling for certain types...
switch (type) {
case ValueType::None:
case ValueType::Null:
case ValueType::Bool:
case ValueType::MinKey:
case ValueType::MaxKey:
case ValueType::SmallInt:
case ValueType::Int:
case ValueType::UInt:
case ValueType::Double:
case ValueType::UTCDate:
case ValueType::Binary:
case ValueType::Illegal: {
break;
}
case ValueType::String: {
if (head == 0xbf) {
// long UTF-8 string. must be at least 9 bytes long so we
// can read the entire string length safely
validateBufferLength(1 + 8, length, true);
}
break;
}
case ValueType::Array: {
ValueLength byteLength = 0;
bool equalSize = false;
bool hasIndexTable = false;
if (head >= 0x02 && head <= 0x05) {
// Array without index table, with 1-8 bytes bytelength, all values with same length
byteLength = 1 << (head - 0x02);
equalSize = true;
} else if (head >= 0x06 && head <= 0x09) {
// Array with index table, with 1-8 bytes bytelength
byteLength = 1 << (head - 0x06);
hasIndexTable = true;
}
if (head == 0x13) {
// compact Array without index table
validateBufferLength(2, length, true);
uint8_t const* p = ptr + 1;
uint8_t const* e = p + length;
ValueLength shifter = 0;
while (true) {
uint8_t c = *p;
byteLength += (c & 0x7f) << shifter;
shifter += 7;
++p;
if (!(c & 0x80)) {
break;
}
if (p == e) {
throw Exception(Exception::ValidatorInvalidLength, "Array length value is out of bounds");
}
}
if (byteLength > length || byteLength < 4) {
throw Exception(Exception::ValidatorInvalidLength, "Array length value is out of bounds");
}
uint8_t const* data = p;
p = ptr + byteLength - 1;
ValueLength nrItems = 0;
shifter = 0;
while (true) {
uint8_t c = *p;
nrItems += (c & 0x7f) << shifter;
shifter += 7;
--p;
if (!(c & 0x80)) {
break;
}
if (p == ptr + byteLength) {
throw Exception(Exception::ValidatorInvalidLength, "Array length value is out of bounds");
}
}
if (nrItems == 0) {
throw Exception(Exception::ValidatorInvalidLength, "Array length value is out of bounds");
}
++p;
// validate the array members
e = p;
p = data;
while (nrItems-- > 0) {
validate(p, e - p, true);
p += Slice(p).byteSize();
}
} else if (byteLength > 0) {
ValueLength nrItemsLength = 0;
if (head >= 0x06) {
nrItemsLength = byteLength;
}
validateBufferLength(1 + byteLength + nrItemsLength, length, true);
ValueLength nrItems = Slice(ptr).length();
uint8_t const* p = ptr + 1 + byteLength;
if (!equalSize) {
p += byteLength;
}
uint8_t const* e = ptr + length;
ValueLength l = 0;
while (nrItems > 0) {
if (p >= e) {
throw Exception(Exception::ValidatorInvalidLength, "Array value is out of bounds");
}
// validate sub value
validate(p, e - p, true);
ValueLength al = Slice(p).byteSize();
if (equalSize) {
if (l == 0) {
l = al;
} else if (l != al) {
throw Exception(Exception::ValidatorInvalidLength, "Unexpected Array value length");
}
}
p += al;
--nrItems;
}
if (hasIndexTable) {
// now also validate index table
nrItems = Slice(ptr).length();
for (ValueLength i = 0; i < nrItems; ++i) {
ValueLength offset = Slice(ptr).getNthOffset(i);
if (offset < 1 + byteLength + nrItemsLength ||
offset >= Slice(ptr).byteSize() - nrItems * byteLength) {
throw Exception(Exception::ValidatorInvalidLength, "Array value offset is out of bounds");
}
validate(ptr + offset, length - offset, true);
}
}
}
break;
}
case ValueType::Object: {
ValueLength byteLength = 0;
if (head >= 0x0b && head <= 0x0e) {
// Object with index table, with 1-8 bytes bytelength, sorted
byteLength = 1 << (head - 0x0b);
} else if (head >= 0x0f && head <= 0x12) {
// Object with index table, with 1-8 bytes bytelength, unsorted
byteLength = 1 << (head - 0x0f);
} else if (head == 0x14) {
// compact Object without index table
// TODO
}
if (byteLength > 0) {
validateBufferLength(1 + byteLength, length, true);
ValueLength nrItems = Slice(ptr).length();
uint8_t const* p = ptr + 1 + byteLength;
uint8_t const* e = ptr + length;
while (nrItems > 0) {
if (p >= e) {
throw Exception(Exception::ValidatorInvalidLength, "Object key offset is out of bounds");
}
// validate key
validate(p, e - p, true);
// skip over key
p += Slice(p).byteSize();
if (p >= e) {
throw Exception(Exception::ValidatorInvalidLength, "Object value offset is out of bounds");
}
// validate value
validate(p, e - p, true);
// skip over value
p += Slice(p).byteSize();
--nrItems;
}
// now also validate index table
for (ValueLength i = 0; i < nrItems; ++i) {
// get offset to key
ValueLength offset = Slice(ptr).getNthOffset(i);
if (offset >= length) {
throw Exception(Exception::ValidatorInvalidLength, "Object key offset is out of bounds");
}
// validate length of key
validate(ptr + offset, length - offset, true);
// skip over key
offset += Slice(ptr + offset).byteSize();
if (offset >= length) {
throw Exception(Exception::ValidatorInvalidLength, "Object value offset is out of bounds");
}
// validate length of value
validate(ptr + offset, length - offset, true);
}
}
break;
}
case ValueType::BCD: {
throw Exception(Exception::NotImplemented);
}
case ValueType::External: {
// check if Externals are forbidden
if (options->disallowExternals) {
throw Exception(Exception::BuilderExternalsDisallowed);
}
// validate if Slice length exceeds the given buffer
validateBufferLength(1 + sizeof(void*), length, true);
// do not perform pointer validation
break;
}
case ValueType::Custom: {
ValueLength byteSize = 0;
if (head == 0xf0) {
byteSize = 1 + 1;
} else if (head == 0xf1) {
byteSize = 1 + 2;
} else if (head == 0xf2) {
byteSize = 1 + 4;
} else if (head == 0xf3) {
byteSize = 1 + 8;
} else if (head >= 0xf4 && head <= 0xf6) {
validateBufferLength(1 + 1, length, true);
byteSize = 1 + 1 + readInteger<ValueLength>(ptr + 1, 1);
if (byteSize == 1 + 1) {
throw Exception(Exception::ValidatorInvalidLength, "Invalid size for Custom type");
}
} else if (head >= 0xf7 && head <= 0xf9) {
validateBufferLength(1 + 2, length, true);
byteSize = 1 + 2 + readInteger<ValueLength>(ptr + 1, 2);
if (byteSize == 1 + 2) {
throw Exception(Exception::ValidatorInvalidLength, "Invalid size for Custom type");
}
} else if (head >= 0xfa && head <= 0xfc) {
validateBufferLength(1 + 4, length, true);
byteSize = 1 + 4 + readInteger<ValueLength>(ptr + 1, 4);
if (byteSize == 1 + 4) {
throw Exception(Exception::ValidatorInvalidLength, "Invalid size for Custom type");
}
} else if (head >= 0xfd) {
validateBufferLength(1 + 8, length, true);
byteSize = 1 + 8 + readInteger<ValueLength>(ptr + 1, 8);
if (byteSize == 1 + 8) {
throw Exception(Exception::ValidatorInvalidLength, "Invalid size for Custom type");
}
}
validateSliceLength(ptr, byteSize, isSubPart);
break;
}
}
// common validation that must happen for all types
validateSliceLength(ptr, length, isSubPart);
return true;
}
void Validator::validateBufferLength(size_t expected, size_t actual, bool isSubPart) const {
if ((expected > actual) ||
(expected != actual && !isSubPart)) {
throw Exception(Exception::ValidatorInvalidLength, "given buffer length is unequal to actual length of Slice in buffer");
}
}
void Validator::validateSliceLength(uint8_t const* ptr, size_t length, bool isSubPart) const {
size_t actual = static_cast<size_t>(Slice(ptr).byteSize());
validateBufferLength(actual, length, isSubPart);
}

View File

@ -1,82 +0,0 @@
/* The MIT License
Copyright (C) 2012 Zilong Tan (eric.zltan@gmail.com)
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use, copy,
modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
#include <cstdint>
#include <cstdlib>
namespace arangodb {
namespace velocypack {
// Compression function for Merkle-Damgard construction.
// This function is generated using the framework provided.
static inline uint64_t mix(uint64_t h) {
h ^= h >> 23;
h *= 0x2127599bf4325c37ULL;
h ^= h >> 47;
return h;
}
uint64_t fasthash64(const void *buf, size_t len, uint64_t seed) {
const uint64_t m = 0x880355f21e6d1965ULL;
const uint64_t *pos = (const uint64_t *)buf;
const uint64_t *end = pos + (len / 8);
const unsigned char *pos2;
uint64_t h = seed ^ (len * m);
uint64_t v;
while (pos != end) {
v = *pos++;
h ^= mix(v);
h *= m;
}
pos2 = (const unsigned char *)pos;
v = 0;
switch (len & 7) {
case 7:
v ^= (uint64_t)pos2[6] << 48;
case 6:
v ^= (uint64_t)pos2[5] << 40;
case 5:
v ^= (uint64_t)pos2[4] << 32;
case 4:
v ^= (uint64_t)pos2[3] << 24;
case 3:
v ^= (uint64_t)pos2[2] << 16;
case 2:
v ^= (uint64_t)pos2[1] << 8;
case 1:
v ^= (uint64_t)pos2[0];
h ^= mix(v);
h *= m;
}
return mix(h);
}
} // namespace arangodb::velocypack
} // namespace arangodb

View File

@ -59,8 +59,7 @@ static_assert(sizeof(std::size_t) == sizeof(uint64_t),
int64_t arangodb::velocypack::currentUTCDateValue() {
return static_cast<int64_t>(
std::chrono::system_clock::now().time_since_epoch().count() /
std::chrono::milliseconds(1).count());
std::chrono::duration_cast<std::chrono::milliseconds>(std::chrono::system_clock::now().time_since_epoch()).count());
}
static_assert(sizeof(arangodb::velocypack::ValueLength) >= sizeof(SIZE_MAX),

View File

@ -625,6 +625,12 @@ endif ()
add_subdirectory(3rdParty)
################################################################################
## VELOCYPACK
################################################################################
add_definitions("-DVELOCYPACK_XXHASH=1")
set(BOOST_VERSION 1.61.0b1)
foreach (LINK_DIR ${LINK_DIRECTORIES})

View File

@ -267,9 +267,9 @@ RANDOM_TOKEN(8) // "zGl09z42"
RANDOM_TOKEN(8) // "m9w50Ft9"
```
!SUBSECTION REGEX()
!SUBSECTION REGEX_TEST()
`REGEX(text, search, caseInsensitive) → bool`
`REGEX_TEST(text, search, caseInsensitive) → bool`
Check whether the pattern *search* is contained in the string *text*,
using regular expression matching.
@ -327,9 +327,9 @@ If the regular expression in *search* is invalid, a warning will be raised
and the function will return *false*.
```js
REGEX("the quick brown fox", "the.*fox") // true
REGEX("the quick brown fox", "^(a|the)\s+(quick|slow).*f.x$") // true
REGEX("the\nquick\nbrown\nfox", "^the(\n[a-w]+)+\nfox$") // true
REGEX_TEST("the quick brown fox", "the.*fox") // true
REGEX_TEST("the quick brown fox", "^(a|the)\s+(quick|slow).*f.x$") // true
REGEX_TEST("the\nquick\nbrown\nfox", "^the(\n[a-w]+)+\nfox$") // true
```
!SUBSECTION REVERSE()

View File

@ -118,6 +118,7 @@ The complete list of keywords is currently:
<li>REMOVE</li>
<li>REPLACE</li>
<li>RETURN</li>
<li>SHORTEST_PATH</li>
<li>SORT</li>
<li>TRUE</li>
<li>UPDATE</li>

View File

@ -1,52 +1,154 @@
!SECTION Distributed deployment using Apache Mesos
Philosophie: Es muss extrem einfach aussehen, trotzdem sollte die Section
hinreichend viel Detailinformation liefern. Ich weiß, dass das ein
Widerspruch ist.
ArangoDB has a sophisticated yet easy to use cluster mode. To leverage the full cluster feature set (monitoring, scaling and failover) you have to run ArangoDB on some kind of cluster management system. Currently ArangoDB relies on Apache Mesos in that matter. Mesos is a cluster operating system which is powers some of the worlds biggest datacenters running several thousands of nodes. Running Arango
!SUBSECTION DC/OS
Explain that ArangoDB is in "the universe" and can be launched under
"Services" in the UI.
DC/OS is the recommended way to install a cluster as it eases much of the process to install a Mesos cluster. You can deploy it very quickly on a variety of cloud hosters or setup your own DC/OS locally. DC/OS is a set of tools built on top of Apache Mesos. Apache Mesos is a so called "Distributed Cluster Operation System" and the core of DC/OS. Apache Mesos has the concept of so called [persistent volumes](http://mesos.apache.org/documentation/latest/persistent-volume/) which make it perfectly suitable for a database.
Show how one reaches the coordinator UI via the DC/OS UI after deployment.
!SUBSUBSECTION Installing
Explain about scaling up and down via the UI.
DC/OS comes with its own package management. Packages can be installed from the so called "Universe". As an official DC/OS partner ArangoDB can be installed from there straight away.
Explain configuration options or link to some place where they are
explained.
1. Installing via DC/OS UI
1. Go to https://dcos.io and prepare a cluster
2. Open your browser and go to the DC/OS admin interface
3. Open the "Universe" tab
4. Locate arangodb and hit "Install Package"
5. Optionally review the settings (Advanced Installation)
6. Press "Install Package"
Explain about the `dcos` CLI and the ArangoDB `dcos`-subcommand.
2. Installing via DC/OS Commandline
Explain how to shut down an ArangoDB cluster cleanly.
1. Install the [dcos cli](https://docs.mesosphere.com/usage/cli/)
2. Open a terminal and issue `dcos install arangodb`
Both options are essentially doing the same in the background. Both are starting ArangoDB with its default options set. To review the default options click "Advanced Installation" in the webinterface or type `dcos package describe --config arangodb`.
Say that ArangoDB uses persistent volumes, explain advantage.
To get an explanation of the various command line options please check the latest options here (choose the most recent number and have a look at config.json):
Say that this makes it impossible to deploy ArangoDB to "public Mesos Agents".
Explain how to setup a marathon-lb load balancer to reach the coordinators
either from within the DC/OS cluster or from the outside (via public slaves).
https://github.com/mesosphere/universe/tree/version-3.x/repo/packages/A/arangodb
Explain authentication setup (once we have it).
Alternatively check the DC/OS webinterface. Hit installing ArangoDB on the "Services" tab and examine "Advanced Installation".
Staubsauger?
After installing DC/OS will start deploying the ArangoDB cluster on the DC/OS cluster. You can watch ArangoDB starting on the "Services" tab in the webinterface. Once it is listed as healthy click the link next to it and you should see the ArangoDB webinterface.
!SUBSUBSECTION While running
!SUBSUBSUBSECTION ArangoDB Mesos framework
While ArangoDB is deployed Mesos will keep your cluster running. The web interface has many monitoring facilities so be sure to make yourself familiar with the DC/OS webinterface. As a fault tolerant system Mesos will take care of most failure scenarios automatically. Mesos does that by running ArangoDB as a so called "framework". This framework has been specifically built to keep ArangoDB running in a healthy condition on the Mesos cluster. From time to time a task might fail. The ArangoDB framework will then take care of rescheduling the failed task. As it knows about the very specifics of each cluster task and its role it will automatically take care of most failure scenarios.
To inspect what the framework is doing go to `WEBINTERFACEURL`/mesos in your browser. Locate the task "arangodb" and inspect stderr in the "Sandbox". This can be of interest for example when a slave got lost and the framework is rescheduling the task.
!SUBSUBSUBSECTION Using ArangoDB
To use ArangoDB as a datastore in your DC/OS cluster you can facilitate the service discovery of DC/OS. Assuming you deployed a standard ArangoDB cluster the [mesos dns](https://github.com/mesosphere/mesos-dns) will know about `arangodb.mesos`. By doing a SRV DNS request (check the documentation of mesos dns) you can find out the port where the internal HAProxy of ArangoDB is running. This will offer a round robin load balancer to access all ArangoDB coordinators.
!SUBSUBSUBSECTION Scaling ArangoDB
To change the settings of your ArangoDB Cluster access the ArangoDB UI and hit "Nodes". On the scale tab you will have the ability to scale your cluster up and down.
After changing the settings the ArangoDB framework will take care of the rest. Scaling your cluster up is generally a straightforward operation as Mesos will simply launch another task and be done with it. Scaling down is a bit more complicated as the data first has to be moved to some other place so that will naturally take somewhat longer.
Please note that scaling operations might not always work. For example if the underlying Mesos cluster is completely saturated with tasks scaling up will not be possible. Scaling down might also fail due to not being able to move all shards of a DBServer to a new destination because of size limitations. Be sure to check the output of the ArangoDB framework.
!SUBSUBSECTION Deinstallation
Deinstalling ArangoDB is a bit more difficult as there is much state being kept in the Mesos cluster which is not automatically cleaned up. To deinstall from the command line use the following one liner:
`dcos arangodb uninstall ; dcos package uninstall arangodb`
This will first cleanup the state in the cluster and then uninstall arangodb.
!SUBSUBSUBSECTION arangodb-cleanup-framework
Should you forget to cleanup the state you can do so later by using the [arangodb-cleanup-framework](https://github.com/arangodb/arangodb-cleanup-framework/) container. Otherwise you might not be able to deploy a new arangodb installation.
The cleanup framework will announce itself as a normal ArangoDB. Mesos will recognize this and offer all persistent volumes it still has for ArangoDB to this framework. The cleanup framework will then properly free the persistent volumes. Finally it will clean up any state left in zookeeper (the central configuration manager in a Mesos cluster).
To deploy follow the instructions in the github repository. After deploying watch the output in the sandbox of the Mesos webinterface. After a while there shouldn't be any persistent resource offers anymore as everything was cleaned up. After that you can delete the cleanup framework again via marathon.
!SUBSECTION Apache Mesos and Marathon
Explain how to launch an ArangoDB cluster via Marathon in an Apache
Mesos cluster.
You can also install ArangoDB on a bare Apache Mesos cluster provided that marathon is running on it.
Explain configuration options or link to some place where they are
explained.
Doing so has the following downsides:
Show how one reaches the coordinator UI, HOW DOES THIS ACTUALLY WORK?
DO WE NEED EXPLAIN ABOUT sshuttle?
1. Manual Mesos cluster setup
1. You need to implement your own service discovery
1. You are missing the dcos cli
1. Install and Deinstall are tedious
1. You need to setup some kind of proxy tunnel to access arangodb from the outside
1. Sparse monitoring capabilities
Mention scaling up and down as in DC/OS case.
However these are things which do not influence ArangoDB itself and operating your cluster like this is fully supported.
Explain how to shut down an ArangoDB cluster cleanly.
!SUBSUBSECTION Installing via Marathon
Explain authentication setup (once we have it) or say it is exactly as
in the DC/OS case.
To install ArangoDB via marathon you need a proper config file:
Staubsauger? Deployment via Marathon.
```
{
"id": "arangodb",
"cpus": 0.25,
"mem": 256.0,
"ports": [0, 0, 0],
"instances": 1,
"args": [
"framework",
"--framework_name=arangodb",
"--master=zk://172.17.0.2:2181/mesos",
"--zk=zk://172.17.0.2:2181/arangodb",
"--user=",
"--principal=pri",
"--role=arangodb",
"--mode=cluster",
"--async_replication=true",
"--minimal_resources_agent=mem(*):512;cpus(*):0.25;disk(*):512",
"--minimal_resources_dbserver=mem(*):512;cpus(*):0.25;disk(*):1024",
"--minimal_resources_secondary=mem(*):512;cpus(*):0.25;disk(*):1024",
"--minimal_resources_coordinator=mem(*):512;cpus(*):0.25;disk(*):1024",
"--nr_agents=1",
"--nr_dbservers=2",
"--nr_coordinators=2",
"--failover_timeout=86400",
"--arangodb_image=arangodb/arangodb-mesos:3.0",
"--secondaries_with_dbservers=false",
"--coordinators_with_dbservers=false"
],
"container": {
"type": "DOCKER",
"docker": {
"image": "arangodb/arangodb-mesos-framework:3.0",
"network": "HOST"
}
},
"healthChecks": [
{
"protocol": "HTTP",
"path": "/framework/v1/health.json",
"gracePeriodSeconds": 3,
"intervalSeconds": 10,
"portIndex": 0,
"timeoutSeconds": 10,
"maxConsecutiveFailures": 0
}
]
}
```
Carefully review the settings (especially the IPs and the resources). Then you can deploy to marathon:
`curl -X POST -H "Content-Type: application/json" http://url-of-marathon/v2/apps -d @arangodb3.json`
Alternatively use the webinterface of marathon to deploy ArangoDB.
!SUBSUBSECTION Deinstallation via Marathon
As with DC/OS you first need to properly cleanup any state leftovers.
The easiest is to simply delete arangodb and then deploy the cleanup-framework (see section arangodb-cleanup-framework).
!SUBSECTION Configuration options
The Arangodb Mesos framework has a ton of different options which are listed and described here: https://github.com/arangodb/arangodb-mesos-framework/tree/3.0

View File

@ -89,6 +89,49 @@ var q = `FOR doc IN ´collection´ RETURN doc.´name´`;
The following AQL functions have been added in 3.0:
- *REGEX_TEST(value, regex)*: tests whether the string *value* matches the regular expression
specified in *regex*. Returns *true* if it matches, and *false* otherwise.
The regular expression may consist of literal characters and the following
characters and sequences:
- `.` the dot matches any single character except line terminators.
To include line terminators, use `[\s\S]` instead to simulate `.` with *DOTALL* flag.
- `\d` matches a single digit, equivalent to `[0-9]`
- `\s` matches a single whitespace character
- `\S` matches a single non-whitespace character
- `\t` matches a tab character
- `\r` matches a carriage return
- `\n` matches a line-feed character
- `[xyz]` set of characters. matches any of the enclosed characters (i.e.
*x*, *y* or *z* in this case
- `[^xyz]` negated set of characters. matches any other character than the
enclosed ones (i.e. anything but *x*, *y* or *z* in this case)
- `[x-z]` range of characters. Matches any of the characters in the
specified range, e.g. `[0-9A-F]` to match any character in
*0123456789ABCDEF*
- `[^x-z]` negated range of characters. Matches any other character than the
ones specified in the range
- `(xyz)` defines and matches a pattern group
- `(x|y)` matches either *x* or *y*
- `^` matches the beginning of the string (e.g. `^xyz`)
- `$` matches the end of the string (e.g. `xyz$`)
Note that the characters `.`, `*`, `?`, `[`, `]`, `(`, `)`, `{`, `}`, `^`,
and `$` have a special meaning in regular expressions and may need to be
escaped using a backslash (`\\`). A literal backslash should also be escaped
using another backslash, i.e. `\\\\`.
Characters and sequences may optionally be repeated using the following
quantifiers:
- `x*` matches zero or more occurrences of *x*
- `x+` matches one or more occurrences of *x*
- `x?` matches one or zero occurrences of *x*
- `x{y}` matches exactly *y* occurrences of *x*
- `x{y,z}` matches between *y* and *z* occurrences of *x*
- `x{y,}` matches at least *y* occurences of *x*
- *HASH(value)*: Calculates a hash value for *value*. *value* is not required to be a
string, but can have any data type. The calculated hash value will take the data type
of *value* into account, so for example the number *1* and the string *"1"* will have

95
README
View File

@ -1,95 +0,0 @@
****** ArangoDB ******
ArangoDB is a multi-model, open-source database with flexible data models for
documents, graphs, and key-values. Build high performance applications using a
convenient SQL-like query language or JavaScript extensions. Use ACID
transactions if you require them. Scale horizontally with a few mouse clicks.
The supported data models can be mixed in queries and allow ArangoDB to be the
aggregation point for your data.
To get started, try one of our 10 minutes tutorials in your favorite
programming language or try one of our ArangoDB_Cookbook_recipes.
For the impatient: download and install ArangoDB. Start the server arangod and
point your browser to http://127.0.0.1:8529/.
***** Key Features in ArangoDB *****
* Multi-Model: Documents, graphs and key-value pairs — model your data as
you see fit for your application.
* Joins: Conveniently join what belongs together for flexible ad-hoc
querying, less data redundancy.
* Transactions: Easy application development keeping your data consistent
and safe. No hassle in your client.
Here is an AQL query that makes use of all those features:
[AQL Query Example]
Joins and transactions are key features for flexible, secure data designs,
widely used in relational databases but lacking in many NoSQL products.
However, there is no need to forego them in ArangoDB. You decide how and when
to use joins and strong consistency guarantees, without sacrificing performance
and scalability.
Furthermore, ArangoDB offers a JavaScript framework called Foxx that is
executed in the database server with direct access to the data. Build your own
data-centric microservices with a few lines of code:
Microservice Example
[Microservice Example]
By extending the HTTP API with user code written in JavaScript, ArangoDB can be
turned into a strict schema-enforcing persistence engine.
Next step, bundle your Foxx application as a docker_container and get it
running in the cloud.
Other features of ArangoDB include:
* Schema-free schemata let you combine the space efficiency of MySQL with
the performance power of NoSQL
* Use a data-centric microservices approach with ArangoDB Foxx and fuse
your application-logic and database together for maximal throughput
* JavaScript for all: no language zoo, you can use one language from your
browser to your back-end
* ArangoDB is multi-threaded - exploit the power of all your cores
* Flexible data modeling: model your data as combination of key-value
pairs, documents or graphs - perfect for social relations
* Free index choice: use the correct index for your problem, be it a skip
list or a fulltext search
* Configurable durability: let the application decide if it needs more
durability or more performance
* Powerful query language (AQL) to retrieve and modify data
* Transactions: run queries on multiple documents or collections with
optional transactional consistency and isolation
* Replication and Sharding: set up the database in a master-slave
configuration or spread bigger datasets across multiple servers
* It is open source (Apache License 2.0)
For more in-depth information read the design_goals_of_ArangoDB
***** Latest Release - ArangoDB 2.8 *****
The What's_new_in_ArangoDB_2.8 can be found in the documentation.
AQL Graph Traversals / Pattern Matching: AQL offers a new feature to traverse
over a graph without writing JavaScript functions but with all the other
features you know from AQL. For this purpose, a special version of FOR
variable-name IN expression has been introduced.
The added Array Indexes are a major improvement to ArangoDB that you will love
and never want to miss again. Hash indexes and skiplist indexes can now be
defined for array values as well, so its freaking fast to access documents
by individual array values.
Additional, there is a cool new aggregation feature that was added after the
beta releases. AQL introduces the keyword AGGREGATE for use in AQL COLLECT
statements. Using AGGREGATE allows more efficient aggregation (incrementally
while building the groups) than previous versions of AQL, which built group
aggregates afterwards from the total of all group values
Optimizer improvements: The AQL query optimizer can now use indexes if multiple
filter conditions on attributes of the same collection are combined with
logical ORs, and if the usage of indexes would completely cover these
conditions.
ArangoDB 2.8 now has an automatic deadlock detection for transactions. A
deadlock is a situation in which two or more concurrent operations (user
transactions or AQL queries) try to access the same resources (collections,
documents) and need to wait for the others to finish, but none of them can make
any progress.
Foxx Improvements
The module resolution used by require now behaves more like in node.js. The
org/arangodb/request module now returns response bodies for error responses by
default. The old behavior of not returning bodies for error responses can be
re-enabled by explicitly setting the option returnBodyOnError to false.
***** More Information *****
Please check the Installation_Manual for installation and compilation
instructions.
The User_Manual has an introductory chapter showing the basic operations of
ArangoDB.
***** Stay in Contact *****
We really appreciate feature requests and bug reports. Please use our Github
issue tracker for reporting them:
https://github.com/arangodb/arangodb/issues
You can use the Google group for improvements, feature requests, comments
http://www.arangodb.com/community

View File

@ -65,7 +65,7 @@ void ActionFeature::start() {
nullptr);
}
void ActionFeature::stop() {
void ActionFeature::unprepare() {
TRI_CleanupActions();
ACTION = nullptr;

View File

@ -36,7 +36,7 @@ class ActionFeature final : public application_features::ApplicationFeature {
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
public:
bool allowUseDatabase() { return _allowUseDatabase; }

View File

@ -38,7 +38,7 @@ using namespace arangodb::rest;
AgencyFeature::AgencyFeature(application_features::ApplicationServer* server)
: ApplicationFeature(server, "Agency"),
_size(1),
_agentId((std::numeric_limits<uint32_t>::max)()),
_agentId(0),
_minElectionTimeout(0.15),
_maxElectionTimeout(2.0),
_notify(false),
@ -104,7 +104,8 @@ void AgencyFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
}
void AgencyFeature::validateOptions(std::shared_ptr<ProgramOptions> options) {
if (_agentId == (std::numeric_limits<uint32_t>::max)()) {
ProgramOptions::ProcessingResult const& result = options->processingResult();
if (!result.touched("agency.id")) {
disable();
return;
}
@ -189,7 +190,7 @@ void AgencyFeature::start() {
_agent->load();
}
void AgencyFeature::stop() {
void AgencyFeature::unprepare() {
if (!isEnabled()) {
return;

View File

@ -40,7 +40,7 @@ class AgencyFeature : virtual public application_features::ApplicationFeature {
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
uint64_t _size; // agency size (default: 5)

View File

@ -193,7 +193,13 @@ std::vector<VPackSlice> State::slices(arangodb::consensus::index_t start,
}
for (size_t i = start - _cur; i <= end - _cur; ++i) { // TODO:: Check bounds
slices.push_back(VPackSlice(_log[i].entry->data()));
try {
slices.push_back(VPackSlice(_log.at(i).entry->data()));
} catch (std::exception const& e) {
break;
LOG_TOPIC(ERR, Logger::AGENCY) << start-_cur << " " << end-_cur << " " << i << " " << _log.size();
}
}
return slices;

View File

@ -85,17 +85,7 @@ class AqlItemBlock {
// First update the reference count, if this fails, the value is empty
if (value.requiresDestruction()) {
auto it = _valueCount.find(value);
if (it == _valueCount.end()) {
TRI_IF_FAILURE("AqlItemBlock::setValue") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
_valueCount.emplace(value, 1);
} else {
TRI_ASSERT(it->second > 0);
++(it->second);
}
++_valueCount[value];
}
_data[index * _nrRegs + varNr] = value;
@ -106,8 +96,7 @@ class AqlItemBlock {
/// use with caution only in special situations when it can be ensured that
/// no one else will be pointing to the same value
void destroyValue(size_t index, RegisterId varNr) {
size_t const pos = index * _nrRegs + varNr;
auto& element = _data[pos];
auto& element = _data[index * _nrRegs + varNr];
if (element.requiresDestruction()) {
auto it = _valueCount.find(element);
@ -130,8 +119,7 @@ class AqlItemBlock {
/// @brief eraseValue, erase the current value of a register not freeing it
/// this is used if the value is stolen and later released from elsewhere
void eraseValue(size_t index, RegisterId varNr) {
size_t const pos = index * _nrRegs + varNr;
auto& element = _data[pos];
auto& element = _data[index * _nrRegs + varNr];
if (element.requiresDestruction()) {
auto it = _valueCount.find(element);
@ -149,7 +137,7 @@ class AqlItemBlock {
element.erase();
}
/// @brief eraseValue, erase the current value of a register not freeing it
/// @brief eraseValue, erase the current value of all values, not freeing them.
/// this is used if the value is stolen and later released from elsewhere
void eraseAll() {
for (auto& it : _data) {
@ -176,13 +164,9 @@ class AqlItemBlock {
/// the same value again. Note that once you do this for a single AqlValue
/// you should delete the AqlItemBlock soon, because the stolen AqlValues
/// might be deleted at any time!
void steal(AqlValue const& v) {
if (v.requiresDestruction()) {
auto it = _valueCount.find(v);
if (it != _valueCount.end()) {
_valueCount.erase(it);
}
void steal(AqlValue const& value) {
if (value.requiresDestruction()) {
_valueCount.erase(value);
}
}

View File

@ -226,13 +226,13 @@ struct AqlValue final {
~AqlValue() = default;
/// @brief whether or not the value must be destroyed
inline bool requiresDestruction() const {
inline bool requiresDestruction() const noexcept {
AqlValueType t = type();
return (t == VPACK_MANAGED || t == DOCVEC || t == RANGE);
}
/// @brief whether or not the value is empty / none
inline bool isEmpty() const {
inline bool isEmpty() const noexcept {
if (type() != VPACK_INLINE) {
return false;
}
@ -240,12 +240,12 @@ struct AqlValue final {
}
/// @brief whether or not the value is a range
inline bool isRange() const {
inline bool isRange() const noexcept {
return type() == RANGE;
}
/// @brief whether or not the value is a docvec
inline bool isDocvec() const {
inline bool isDocvec() const noexcept {
return type() == DOCVEC;
}
@ -352,7 +352,7 @@ struct AqlValue final {
AqlValue clone() const;
/// @brief invalidates/resets a value to None, not freeing any memory
void erase() {
void erase() noexcept {
_data.internal[0] = '\x00';
setType(AqlValueType::VPACK_INLINE);
}
@ -378,7 +378,7 @@ struct AqlValue final {
/// @brief Returns the type of this value. If true it uses an external pointer
/// if false it uses the internal data structure
inline AqlValueType type() const {
inline AqlValueType type() const noexcept {
return static_cast<AqlValueType>(_data.internal[sizeof(_data.internal) - 1]);
}

View File

@ -85,7 +85,7 @@ VPackBuilder BindParameters::StripCollectionNames(VPackSlice const& keys,
TRI_ASSERT(keys.isArray());
VPackBuilder result;
result.openArray();
for (auto const& element : VPackArrayIterator(keys, false)) {
for (auto const& element : VPackArrayIterator(keys)) {
if (element.isString()) {
VPackValueLength l;
char const* s = element.getString(l);

View File

@ -90,7 +90,7 @@ bool EnumerateCollectionBlock::moreDocuments(size_t hint) {
_documents = _scanner.scan(hint);
TRI_ASSERT(_documents.isArray());
_iterator = VPackArrayIterator(_documents, true);
_iterator = VPackArrayIterator(_documents);
VPackValueLength count = _iterator.size();
@ -199,7 +199,7 @@ AqlItemBlock* EnumerateCollectionBlock::getSome(size_t, // atLeast,
for (size_t j = 0; j < toSend; j++) {
if (j > 0) {
// re-use already copied aqlvalues
// re-use already copied AQLValues
for (RegisterId i = 0; i < curRegs; i++) {
res->setValue(j, i, res->getValueReference(0, i));
// Note: if this throws, then all values will be deleted

View File

@ -177,8 +177,8 @@ struct FunctionDefiner {
false, true, true, &Functions::Contains});
add({"LIKE", "AQL_LIKE", "s,r|b", true, true, false, true,
true, &Functions::Like});
add({"REGEX", "AQL_REGEX", "s,r|b", true, true, false, true,
true, &Functions::Regex});
add({"REGEX_TEST", "AQL_REGEX_TEST", "s,r|b", true, true, false, true,
true, &Functions::RegexTest});
add({"LEFT", "AQL_LEFT", "s,n", true, true, false, true, true});
add({"RIGHT", "AQL_RIGHT", "s,n", true, true, false, true, true});
add({"TRIM", "AQL_TRIM", "s|ns", true, true, false, true, true});

View File

@ -351,7 +351,7 @@ static void ExtractKeys(std::unordered_set<std::string>& names,
AqlValueMaterializer materializer(trx);
VPackSlice s = materializer.slice(param, false);
for (auto const& v : VPackArrayIterator(s, true)) {
for (auto const& v : VPackArrayIterator(s)) {
if (v.isString()) {
names.emplace(v.copyString());
} else {
@ -418,7 +418,7 @@ static bool ListContainsElement(arangodb::AqlTransaction* trx,
AqlValueMaterializer testeeMaterializer(trx);
VPackSlice testeeSlice = testeeMaterializer.slice(testee, false);
VPackArrayIterator it(slice, true);
VPackArrayIterator it(slice);
while (it.valid()) {
if (arangodb::basics::VelocyPackHelper::compare(testeeSlice, it.value(), false, options) == 0) {
index = static_cast<size_t>(it.index());
@ -467,7 +467,7 @@ static bool Variance(arangodb::AqlTransaction* trx,
AqlValueMaterializer materializer(trx);
VPackSlice slice = materializer.slice(values, false);
for (auto const& element : VPackArrayIterator(slice, true)) {
for (auto const& element : VPackArrayIterator(slice)) {
if (!element.isNull()) {
if (!element.isNumber()) {
return false;
@ -494,7 +494,7 @@ static bool SortNumberList(arangodb::AqlTransaction* trx,
AqlValueMaterializer materializer(trx);
VPackSlice slice = materializer.slice(values, false);
for (auto const& element : VPackArrayIterator(slice, true)) {
for (auto const& element : VPackArrayIterator(slice)) {
if (!element.isNull()) {
if (!element.isNumber()) {
return false;
@ -547,7 +547,7 @@ static void RequestEdges(VPackSlice vertexSlice,
VPackSlice edges = opRes->slice();
TRI_ASSERT(edges.isArray());
if (includeVertices) {
for (auto const& edge : VPackArrayIterator(edges, true)) {
for (auto const& edge : VPackArrayIterator(edges)) {
VPackObjectBuilder guard(&result);
if (matcher == nullptr || matcher->matches(edge)) {
result.add("edge", edge);
@ -597,7 +597,7 @@ static void RequestEdges(VPackSlice vertexSlice,
}
}
} else {
for (auto const& edge : VPackArrayIterator(edges, true)) {
for (auto const& edge : VPackArrayIterator(edges)) {
if (matcher == nullptr || matcher->matches(edge)) {
result.add(edge);
}
@ -724,7 +724,7 @@ static AqlValue MergeParameters(arangodb::aql::Query* query,
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
// merge in all other arguments
for (auto const& it : VPackArrayIterator(initialSlice, true)) {
for (auto const& it : VPackArrayIterator(initialSlice)) {
if (!it.isObject()) {
RegisterInvalidArgumentWarning(query, funcName);
builder.clear();
@ -905,7 +905,7 @@ static AqlValue buildGeoResult(arangodb::AqlTransaction* trx,
static void FlattenList(VPackSlice const& array, size_t maxDepth,
size_t curDepth, VPackBuilder& result) {
TRI_ASSERT(result.isOpenArray());
for (auto const& tmp : VPackArrayIterator(array, true)) {
for (auto const& tmp : VPackArrayIterator(array)) {
if (tmp.isArray() && curDepth < maxDepth) {
FlattenList(tmp, maxDepth, curDepth + 1, result);
} else {
@ -1258,7 +1258,7 @@ AqlValue Functions::Concat(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice slice = materializer.slice(member, false);
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (it.isNull()) {
continue;
}
@ -1313,7 +1313,7 @@ AqlValue Functions::ConcatSeparator(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice slice = materializer.slice(member, false);
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (it.isNull()) {
continue;
}
@ -1419,11 +1419,11 @@ AqlValue Functions::Like(arangodb::aql::Query* query,
return AqlValue(result);
}
/// @brief function REGEX
AqlValue Functions::Regex(arangodb::aql::Query* query,
arangodb::AqlTransaction* trx,
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "REGEX", 2, 3);
/// @brief function REGEX_TEST
AqlValue Functions::RegexTest(arangodb::aql::Query* query,
arangodb::AqlTransaction* trx,
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "REGEX_TEST", 2, 3);
bool const caseInsensitive = GetBooleanParameter(trx, parameters, 2, false);
StringBufferLeaser buffer(trx);
arangodb::basics::VPackStringBufferAdapter adapter(buffer->stringBuffer());
@ -1464,7 +1464,7 @@ AqlValue Functions::Regex(arangodb::aql::Query* query,
if (matcher == nullptr) {
// compiling regular expression failed
RegisterWarning(query, "REGEX", TRI_ERROR_QUERY_INVALID_REGEX);
RegisterWarning(query, "REGEX_TEST", TRI_ERROR_QUERY_INVALID_REGEX);
return AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
@ -1479,7 +1479,7 @@ AqlValue Functions::Regex(arangodb::aql::Query* query,
if (error) {
// compiling regular expression failed
RegisterWarning(query, "REGEX", TRI_ERROR_QUERY_INVALID_REGEX);
RegisterWarning(query, "REGEX_TEST", TRI_ERROR_QUERY_INVALID_REGEX);
return AqlValue(arangodb::basics::VelocyPackHelper::NullValue());
}
@ -1752,7 +1752,7 @@ AqlValue Functions::Min(arangodb::aql::Query* query,
VPackSlice slice = materializer.slice(value, false);
VPackSlice minValue;
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (it.isNull()) {
continue;
}
@ -1781,7 +1781,7 @@ AqlValue Functions::Max(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice slice = materializer.slice(value, false);
VPackSlice maxValue;
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (maxValue.isNone() || arangodb::basics::VelocyPackHelper::compare(it, maxValue, true) > 0) {
maxValue = it;
}
@ -1807,7 +1807,7 @@ AqlValue Functions::Sum(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice slice = materializer.slice(value, false);
double sum = 0.0;
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (it.isNull()) {
continue;
}
@ -1841,7 +1841,7 @@ AqlValue Functions::Average(arangodb::aql::Query* query,
double sum = 0.0;
size_t count = 0;
for (auto const& v : VPackArrayIterator(slice, true)) {
for (auto const& v : VPackArrayIterator(slice)) {
if (v.isNull()) {
continue;
}
@ -1976,7 +1976,7 @@ AqlValue Functions::Unique(arangodb::aql::Query* query,
values(512, arangodb::basics::VelocyPackHelper::VPackHash(),
arangodb::basics::VelocyPackHelper::VPackEqual(&options));
for (auto const& s : VPackArrayIterator(slice, true)) {
for (auto const& s : VPackArrayIterator(slice)) {
if (!s.isNone()) {
values.emplace(s);
}
@ -2013,7 +2013,7 @@ AqlValue Functions::SortedUnique(arangodb::aql::Query* query,
arangodb::basics::VelocyPackHelper::VPackLess<true> less(trx->transactionContext()->getVPackOptions(), &slice, &slice);
std::set<VPackSlice, arangodb::basics::VelocyPackHelper::VPackLess<true>> values(less);
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (!it.isNone()) {
values.insert(it);
}
@ -2059,7 +2059,7 @@ AqlValue Functions::Union(arangodb::aql::Query* query,
VPackSlice slice = materializer.slice(value, false);
// this passes ownership for the JSON contens into result
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
builder->add(it);
TRI_IF_FAILURE("AqlFunctions::OutOfMemory2") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
@ -2110,7 +2110,7 @@ AqlValue Functions::UnionDistinct(arangodb::aql::Query* query,
materializers.emplace_back(trx);
VPackSlice slice = materializers.back().slice(value, false);
for (auto const& v : VPackArrayIterator(slice, true)) {
for (auto const& v : VPackArrayIterator(slice)) {
if (values.find(v) == values.end()) {
TRI_IF_FAILURE("AqlFunctions::OutOfMemory1") {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
@ -2176,7 +2176,7 @@ AqlValue Functions::Intersection(arangodb::aql::Query* query,
materializers.emplace_back(trx);
VPackSlice slice = materializers.back().slice(value, false);
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (i == 0) {
// round one
@ -2678,7 +2678,7 @@ AqlValue Functions::Slice(arangodb::aql::Query* query,
builder->openArray();
int64_t pos = 0;
VPackArrayIterator it(arraySlice, true);
VPackArrayIterator it(arraySlice);
while (it.valid()) {
if (pos >= from && pos < to) {
builder->add(it.value());
@ -2722,7 +2722,7 @@ AqlValue Functions::Minus(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice arraySlice = materializer.slice(baseArray, false);
VPackArrayIterator it(arraySlice, true);
VPackArrayIterator it(arraySlice);
while (it.valid()) {
contains.emplace(it.value(), it.index());
it.next();
@ -2741,7 +2741,7 @@ AqlValue Functions::Minus(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice arraySlice = materializer.slice(next, false);
for (auto const& search : VPackArrayIterator(arraySlice, true)) {
for (auto const& search : VPackArrayIterator(arraySlice)) {
auto find = contains.find(search);
if (find != contains.end()) {
@ -2787,7 +2787,7 @@ AqlValue Functions::Document(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice idSlice = materializer.slice(id, false);
builder->openArray();
for (auto const& next : VPackArrayIterator(idSlice, true)) {
for (auto const& next : VPackArrayIterator(idSlice)) {
if (next.isString()) {
std::string identifier = next.copyString();
std::string colName;
@ -2829,7 +2829,7 @@ AqlValue Functions::Document(arangodb::aql::Query* query,
if (!notFound) {
AqlValueMaterializer materializer(trx);
VPackSlice idSlice = materializer.slice(id, false);
for (auto const& next : VPackArrayIterator(idSlice, true)) {
for (auto const& next : VPackArrayIterator(idSlice)) {
if (next.isString()) {
std::string identifier(next.copyString());
GetDocumentByIdentifier(trx, collectionName, identifier, true, *builder.get());
@ -2943,7 +2943,7 @@ AqlValue Functions::Edges(arangodb::aql::Query* query,
builder->openArray();
if (vertexSlice.isArray()) {
for (auto const& v : VPackArrayIterator(vertexSlice, true)) {
for (auto const& v : VPackArrayIterator(vertexSlice)) {
RequestEdges(v, trx, collectionName, indexId, direction,
matcher.get(), includeVertices, *builder.get());
}
@ -3323,7 +3323,7 @@ AqlValue Functions::Push(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice l = materializer.slice(list, false);
for (auto const& it : VPackArrayIterator(l, true)) {
for (auto const& it : VPackArrayIterator(l)) {
builder->add(it);
}
VPackOptions options;
@ -3364,7 +3364,7 @@ AqlValue Functions::Pop(arangodb::aql::Query* query,
TransactionBuilderLeaser builder(trx);
builder->openArray();
auto iterator = VPackArrayIterator(slice, true);
auto iterator = VPackArrayIterator(slice);
while (iterator.valid() && !iterator.isLast()) {
builder->add(iterator.value());
iterator.next();
@ -3412,7 +3412,7 @@ AqlValue Functions::Append(arangodb::aql::Query* query,
trx->transactionContext()->orderCustomTypeHandler().get();
if (!list.isNull(true)) {
if (list.isArray()) {
for (auto const& it : VPackArrayIterator(l, true)) {
for (auto const& it : VPackArrayIterator(l)) {
builder->add(it);
}
}
@ -3427,7 +3427,7 @@ AqlValue Functions::Append(arangodb::aql::Query* query,
if (unique) {
std::unordered_set<VPackSlice> added;
added.reserve(static_cast<size_t>(slice.length()));
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
if (added.find(it) == added.end() &&
!ListContainsElement(&options, l, it)) {
builder->add(it);
@ -3435,7 +3435,7 @@ AqlValue Functions::Append(arangodb::aql::Query* query,
}
}
} else {
for (auto const& it : VPackArrayIterator(slice, true)) {
for (auto const& it : VPackArrayIterator(slice)) {
builder->add(it);
}
}
@ -3483,7 +3483,7 @@ AqlValue Functions::Unshift(arangodb::aql::Query* query,
if (list.isArray()) {
AqlValueMaterializer materializer(trx);
VPackSlice v = materializer.slice(list, false);
for (auto const& it : VPackArrayIterator(v, true)) {
for (auto const& it : VPackArrayIterator(v)) {
builder->add(it);
}
}
@ -3514,7 +3514,7 @@ AqlValue Functions::Shift(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice l = materializer.slice(list, false);
auto iterator = VPackArrayIterator(l, true);
auto iterator = VPackArrayIterator(l);
// This jumps over the first element
while (iterator.next()) {
builder->add(iterator.value());
@ -3563,7 +3563,7 @@ AqlValue Functions::RemoveValue(arangodb::aql::Query* query,
AqlValueMaterializer materializer(trx);
VPackSlice v = materializer.slice(list, false);
for (auto const& it : VPackArrayIterator(v, true)) {
for (auto const& it : VPackArrayIterator(v)) {
if (useLimit && limit == 0) {
// Just copy
builder->add(it);
@ -3616,7 +3616,7 @@ AqlValue Functions::RemoveValues(arangodb::aql::Query* query,
TransactionBuilderLeaser builder(trx);
builder->openArray();
for (auto const& it : VPackArrayIterator(l, true)) {
for (auto const& it : VPackArrayIterator(l)) {
if (!ListContainsElement(&options, v, it)) {
builder->add(it);
}
@ -3665,7 +3665,7 @@ AqlValue Functions::RemoveNth(arangodb::aql::Query* query,
size_t target = static_cast<size_t>(p);
size_t cur = 0;
builder->openArray();
for (auto const& it : VPackArrayIterator(v, true)) {
for (auto const& it : VPackArrayIterator(v)) {
if (cur != target) {
builder->add(it);
}
@ -3996,7 +3996,7 @@ AqlValue Functions::Range(arangodb::aql::Query* query,
AqlValue stepValue = ExtractFunctionParameterValue(trx, parameters, 2);
if (stepValue.isNull(true)) {
// no step specified
// no step specified. return a real range object
return AqlValue(left.toInt64(), right.toInt64());
}

View File

@ -97,7 +97,7 @@ struct Functions {
VPackFunctionParameters const&);
static AqlValue Like(arangodb::aql::Query*, arangodb::AqlTransaction*,
VPackFunctionParameters const&);
static AqlValue Regex(arangodb::aql::Query*, arangodb::AqlTransaction*,
static AqlValue RegexTest(arangodb::aql::Query*, arangodb::AqlTransaction*,
VPackFunctionParameters const&);
static AqlValue Passthru(arangodb::aql::Query*, arangodb::AqlTransaction*,
VPackFunctionParameters const&);

View File

@ -410,7 +410,7 @@ bool IndexBlock::readIndex(size_t atMost) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
for (auto const& doc : VPackArrayIterator(slice, true)) {
for (auto const& doc : VPackArrayIterator(slice)) {
if (!hasMultipleIndexes) {
_documents.emplace_back(doc);
} else {

View File

@ -453,7 +453,7 @@ AqlItemBlock* InsertBlock::work(std::vector<AqlItemBlock*>& blocks) {
dstRow -= n;
VPackSlice resultList = opRes.slice();
TRI_ASSERT(resultList.isArray());
for (auto const& elm: VPackArrayIterator(resultList, false)) {
for (auto const& elm: VPackArrayIterator(resultList)) {
bool wasError = arangodb::basics::VelocyPackHelper::getBooleanValue(
elm, "error", false);
if (!wasError) {

File diff suppressed because it is too large Load Diff

View File

@ -1,8 +1,8 @@
/* A Bison parser, made by GNU Bison 3.0.2. */
/* A Bison parser, made by GNU Bison 3.0.4. */
/* Bison interface for Yacc-like parsers in C
Copyright (C) 1984, 1989-1990, 2000-2013 Free Software Foundation, Inc.
Copyright (C) 1984, 1989-1990, 2000-2015 Free Software Foundation, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
@ -79,46 +79,48 @@ extern int Aqldebug;
T_AND = 288,
T_OR = 289,
T_NIN = 290,
T_EQ = 291,
T_NE = 292,
T_LT = 293,
T_GT = 294,
T_LE = 295,
T_GE = 296,
T_LIKE = 297,
T_PLUS = 298,
T_MINUS = 299,
T_TIMES = 300,
T_DIV = 301,
T_MOD = 302,
T_QUESTION = 303,
T_COLON = 304,
T_SCOPE = 305,
T_RANGE = 306,
T_COMMA = 307,
T_OPEN = 308,
T_CLOSE = 309,
T_OBJECT_OPEN = 310,
T_OBJECT_CLOSE = 311,
T_ARRAY_OPEN = 312,
T_ARRAY_CLOSE = 313,
T_OUTBOUND = 314,
T_INBOUND = 315,
T_ANY = 316,
T_ALL = 317,
T_NONE = 318,
UMINUS = 319,
UPLUS = 320,
FUNCCALL = 321,
REFERENCE = 322,
INDEXED = 323,
EXPANSION = 324
T_REGEX_MATCH = 291,
T_REGEX_NON_MATCH = 292,
T_EQ = 293,
T_NE = 294,
T_LT = 295,
T_GT = 296,
T_LE = 297,
T_GE = 298,
T_LIKE = 299,
T_PLUS = 300,
T_MINUS = 301,
T_TIMES = 302,
T_DIV = 303,
T_MOD = 304,
T_QUESTION = 305,
T_COLON = 306,
T_SCOPE = 307,
T_RANGE = 308,
T_COMMA = 309,
T_OPEN = 310,
T_CLOSE = 311,
T_OBJECT_OPEN = 312,
T_OBJECT_CLOSE = 313,
T_ARRAY_OPEN = 314,
T_ARRAY_CLOSE = 315,
T_OUTBOUND = 316,
T_INBOUND = 317,
T_ANY = 318,
T_ALL = 319,
T_NONE = 320,
UMINUS = 321,
UPLUS = 322,
FUNCCALL = 323,
REFERENCE = 324,
INDEXED = 325,
EXPANSION = 326
};
#endif
/* Value type. */
#if ! defined YYSTYPE && ! defined YYSTYPE_IS_DECLARED
typedef union YYSTYPE YYSTYPE;
union YYSTYPE
{
#line 19 "Aql/grammar.y" /* yacc.c:1909 */
@ -131,8 +133,10 @@ union YYSTYPE
bool boolval;
int64_t intval;
#line 135 "Aql/grammar.hpp" /* yacc.c:1909 */
#line 137 "Aql/grammar.hpp" /* yacc.c:1909 */
};
typedef union YYSTYPE YYSTYPE;
# define YYSTYPE_IS_TRIVIAL 1
# define YYSTYPE_IS_DECLARED 1
#endif

View File

@ -238,6 +238,9 @@ static AstNode const* GetIntoExpression(AstNode const* node) {
%token T_OR "or operator"
%token T_NIN "not in operator"
%token T_REGEX_MATCH "~= operator"
%token T_REGEX_NON_MATCH "~! operator"
%token T_EQ "== operator"
%token T_NE "!= operator"
%token T_LT "< operator"
@ -285,7 +288,7 @@ static AstNode const* GetIntoExpression(AstNode const* node) {
%left T_OR
%left T_AND
%nonassoc T_OUTBOUND T_INBOUND T_ANY T_ALL T_NONE
%left T_EQ T_NE T_LIKE
%left T_EQ T_NE T_LIKE T_REGEX_MATCH T_REGEX_NON_MATCH
%left T_IN T_NIN
%left T_LT T_GT T_LE T_GE
%left T_RANGE
@ -1147,6 +1150,19 @@ operator_binary:
arguments->addMember($3);
$$ = parser->ast()->createNodeFunctionCall("LIKE", arguments);
}
| expression T_REGEX_MATCH expression {
AstNode* arguments = parser->ast()->createNodeArray(2);
arguments->addMember($1);
arguments->addMember($3);
$$ = parser->ast()->createNodeFunctionCall("REGEX_TEST", arguments);
}
| expression T_REGEX_NON_MATCH expression {
AstNode* arguments = parser->ast()->createNodeArray(2);
arguments->addMember($1);
arguments->addMember($3);
AstNode* node = parser->ast()->createNodeFunctionCall("REGEX_TEST", arguments);
$$ = parser->ast()->createNodeUnaryOperator(NODE_TYPE_OPERATOR_UNARY_NOT, node);
}
| expression quantifier T_EQ expression {
$$ = parser->ast()->createNodeBinaryArrayOperator(NODE_TYPE_OPERATOR_BINARY_ARRAY_EQ, $1, $4, $2);
}

File diff suppressed because it is too large Load Diff

View File

@ -199,6 +199,14 @@ namespace arangodb {
* operators
* --------------------------------------------------------------------------- */
"=~" {
return T_REGEX_MATCH;
}
"!~" {
return T_REGEX_NON_MATCH;
}
"==" {
return T_EQ;
}

View File

@ -1153,11 +1153,15 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
} else if (res.status == CL_COMM_BACKEND_UNAVAILABLE ||
(res.status == CL_COMM_TIMEOUT && !res.sendWasComplete)) {
requests[index].result = res;
// In this case we will retry at the dueTime, if it is before endTime:
if (dueTime[index] >= endTime) {
requests[index].done = true;
nrDone++;
}
LOG_TOPIC(TRACE, logTopic) << "ClusterComm::performRequests: "
<< "got BACKEND_UNAVAILABLE or TIMEOUT from "
<< requests[index].destination << ":"
<< requests[index].path;
// In this case we will retry at the dueTime
} else { // a "proper error"
requests[index].result = res;
requests[index].done = true;

View File

@ -461,7 +461,7 @@ void ClusterFeature::start() {
dispatcher->buildAqlQueue();
}
void ClusterFeature::stop() {
void ClusterFeature::unprepare() {
if (_enableCluster) {
if (_heartbeatThread != nullptr) {
_heartbeatThread->beginShutdown();

View File

@ -42,7 +42,7 @@ class ClusterFeature : public application_features::ApplicationFeature {
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
std::vector<std::string> _agencyEndpoints;

View File

@ -649,21 +649,19 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
}
auto shards = collinfo->shardIds();
CoordTransactionID coordTransactionID = TRI_NewTickServer();
std::vector<ClusterCommRequest> requests;
auto body = std::make_shared<std::string>();
for (auto const& p : *shards) {
auto headers = std::make_unique<std::unordered_map<std::string, std::string>>();
cc->asyncRequest(
"", coordTransactionID, "shard:" + p.first,
arangodb::GeneralRequest::RequestType::GET,
"/_db/" + StringUtils::urlEncode(dbname) + "/_api/collection/" +
StringUtils::urlEncode(p.first) + "/count",
std::shared_ptr<std::string>(), headers, nullptr, 300.0);
requests.emplace_back("shard:" + p.first,
arangodb::GeneralRequest::RequestType::GET,
"/_db/" + StringUtils::urlEncode(dbname) +
"/_api/collection/" +
StringUtils::urlEncode(p.first) + "/count", body);
}
// Now listen to the results:
int count;
int nrok = 0;
for (count = (int)shards->size(); count > 0; count--) {
auto res = cc->wait("", coordTransactionID, 0, "", 0.0);
size_t nrDone = 0;
cc->performRequests(requests, CL_DEFAULT_TIMEOUT, nrDone, Logger::QUERIES);
for (auto& req : requests) {
auto& res = req.result;
if (res.status == CL_COMM_RECEIVED) {
if (res.answer_code == arangodb::GeneralResponse::ResponseCode::OK) {
std::shared_ptr<VPackBuilder> answerBuilder = ExtractAnswer(res);
@ -674,18 +672,18 @@ int countOnCoordinator(std::string const& dbname, std::string const& collname,
result +=
arangodb::basics::VelocyPackHelper::getNumericValue<uint64_t>(
answer, "count", 0);
nrok++;
} else {
return TRI_ERROR_INTERNAL;
}
} else {
return static_cast<int>(res.answer_code);
}
} else {
return TRI_ERROR_CLUSTER_BACKEND_UNAVAILABLE;
}
}
if (nrok != (int)shards->size()) {
return TRI_ERROR_INTERNAL;
}
return TRI_ERROR_NO_ERROR; // the cluster operation was OK, however,
// the DBserver could have reported an error.
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////

View File

@ -24,10 +24,10 @@
#include "Dispatcher.h"
#include "Logger/Logger.h"
#include "Dispatcher/DispatcherQueue.h"
#include "Dispatcher/DispatcherThread.h"
#include "Dispatcher/Job.h"
#include "Logger/Logger.h"
using namespace arangodb::basics;
using namespace arangodb::rest;
@ -59,21 +59,22 @@ void Dispatcher::addStandardQueue(size_t nrThreads, size_t nrExtraThreads,
size_t maxSize) {
TRI_ASSERT(_queues[STANDARD_QUEUE] == nullptr);
_queues[STANDARD_QUEUE] =
new DispatcherQueue(_scheduler, this, STANDARD_QUEUE,
CreateDispatcherThread, nrThreads, nrExtraThreads, maxSize);
_queues[STANDARD_QUEUE] = new DispatcherQueue(
_scheduler, this, STANDARD_QUEUE, CreateDispatcherThread, nrThreads,
nrExtraThreads, maxSize);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief adds the AQL queue (used for the cluster)
////////////////////////////////////////////////////////////////////////////////
void Dispatcher::addAQLQueue(size_t nrThreads, size_t nrExtraThreads,
void Dispatcher::addAQLQueue(size_t nrThreads, size_t nrExtraThreads,
size_t maxSize) {
TRI_ASSERT(_queues[AQL_QUEUE] == nullptr);
_queues[AQL_QUEUE] = new DispatcherQueue(
_scheduler, this, AQL_QUEUE, CreateDispatcherThread, nrThreads, nrExtraThreads, maxSize);
_queues[AQL_QUEUE] =
new DispatcherQueue(_scheduler, this, AQL_QUEUE, CreateDispatcherThread,
nrThreads, nrExtraThreads, maxSize);
}
////////////////////////////////////////////////////////////////////////////////
@ -100,7 +101,8 @@ int Dispatcher::addJob(std::unique_ptr<Job>& job, bool startThread) {
// log success, but do this BEFORE the real add, because the addJob might
// execute
// and delete the job before we have a chance to log something
LOG(TRACE) << "added job " << (void*)(job.get()) << " to queue '" << qnr << "'";
LOG(TRACE) << "added job " << (void*)(job.get()) << " to queue '" << qnr
<< "'";
// add the job to the list of ready jobs
return queue->addJob(job, startThread);
@ -152,8 +154,6 @@ void Dispatcher::shutdown() {
LOG(DEBUG) << "shutting down the dispatcher";
for (auto queue : _queues) {
if (queue != nullptr) {
queue->shutdown();
}
@ -169,7 +169,11 @@ void Dispatcher::reportStatus() {
DispatcherQueue* queue = _queues[i];
if (queue != nullptr) {
LOG(INFO) << "dispatcher queue '" << i << "': initial = " << queue->_nrThreads << ", running = " << queue->_nrRunning.load() << ", waiting = " << queue->_nrWaiting.load() << ", blocked = " << queue->_nrBlocked.load();
LOG(INFO) << "dispatcher queue '" << i
<< "': initial = " << queue->_nrThreads
<< ", running = " << queue->_nrRunning.load()
<< ", waiting = " << queue->_nrWaiting.load()
<< ", blocked = " << queue->_nrBlocked.load();
}
}
}

View File

@ -57,7 +57,9 @@ DispatcherFeature::DispatcherFeature(
}
DispatcherFeature::~DispatcherFeature() {
delete _dispatcher;
if (_dispatcher != nullptr) {
delete _dispatcher;
}
}
void DispatcherFeature::collectOptions(
@ -157,7 +159,9 @@ void DispatcherFeature::stop() {
}
_dispatcher->shutdown();
}
void DispatcherFeature::unprepare() {
DISPATCHER = nullptr;
}

View File

@ -49,6 +49,7 @@ class DispatcherFeature final
void start() override final;
void beginShutdown() override final;
void stop() override final;
void unprepare() override final;
private:
uint64_t _nrStandardThreads;

View File

@ -55,7 +55,7 @@ class EdgeIndexIterator final : public IndexIterator {
_index(index),
_searchValues(searchValues),
_keys(_searchValues.slice()),
_iterator(_keys, true),
_iterator(_keys),
_posInBuffer(0),
_batchSize(1000) {}
@ -66,7 +66,7 @@ class EdgeIndexIterator final : public IndexIterator {
_index(index),
_searchValues(arangodb::velocypack::Builder::clone(searchValues)),
_keys(_searchValues.slice()),
_iterator(_keys, true),
_iterator(_keys),
_posInBuffer(0),
_batchSize(1000) {}

View File

@ -599,7 +599,7 @@ void HashIndex::transformSearchValues(VPackSlice const values,
}
VPackArrayBuilder guard(&result);
for (auto const& v : VPackArrayIterator(values, true)) {
for (auto const& v : VPackArrayIterator(values)) {
if (!v.isObject() || !v.hasKey(TRI_SLICE_KEY_EQUAL)) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_BAD_PARAMETER, "Hash index only allows == comparison.");
}

View File

@ -123,7 +123,7 @@ class HashIndexIteratorVPack final : public IndexIterator {
: _trx(trx),
_index(index),
_searchValues(searchValues.get()),
_iterator(_searchValues->slice(), true),
_iterator(_searchValues->slice()),
_buffer(),
_posInBuffer(0) {
searchValues.release(); // now we have ownership for searchValues

View File

@ -47,7 +47,7 @@ class PrimaryIndexIterator final : public IndexIterator {
: _trx(trx),
_index(index),
_keys(keys.get()),
_iterator(_keys->slice(), true) {
_iterator(_keys->slice()) {
keys.release(); // now we have ownership for _keys
TRI_ASSERT(_keys->slice().isArray());

View File

@ -257,7 +257,7 @@ void RocksDBFeature::start() {
}
}
void RocksDBFeature::stop() {
void RocksDBFeature::unprepare() {
if (!isEnabled()) {
return;
}

View File

@ -45,7 +45,7 @@ class RocksDBFeature final : public application_features::ApplicationFeature {
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
inline rocksdb::OptimisticTransactionDB* db() const { return _db; }
inline RocksDBKeyComparator* comparator() const { return _comparator; }

View File

@ -108,7 +108,7 @@ bool RestEdgesHandler::getEdgesForVertexList(
// generate result
scannedIndex += static_cast<size_t>(edges.length());
for (auto const& edge : VPackArrayIterator(edges, true)) {
for (auto const& edge : VPackArrayIterator(edges)) {
bool add = true;
if (!expressions.empty()) {
for (auto& exp : expressions) {
@ -164,7 +164,7 @@ bool RestEdgesHandler::getEdgesForVertex(
// generate result
scannedIndex += static_cast<size_t>(edges.length());
for (auto const& edge : VPackArrayIterator(edges, true)) {
for (auto const& edge : VPackArrayIterator(edges)) {
bool add = true;
if (!expressions.empty()) {
for (auto& exp : expressions) {

View File

@ -637,13 +637,15 @@ bool RestVocbaseBaseHandler::extractBooleanParameter(char const* name,
std::shared_ptr<VPackBuilder> RestVocbaseBaseHandler::parseVelocyPackBody(
VPackOptions const* options, bool& success) {
bool found;
std::string const& contentType =
_request->header(StaticStrings::ContentTypeHeader, found);
try {
success = true;
#if 0
// currently deactivated...
bool found;
std::string const& contentType =
_request->header(StaticStrings::ContentTypeHeader, found);
if (found && contentType.size() == StaticStrings::MimeTypeVPack.size() &&
contentType == StaticStrings::MimeTypeVPack) {
VPackSlice slice{_request->body().c_str()};
@ -653,6 +655,9 @@ std::shared_ptr<VPackBuilder> RestVocbaseBaseHandler::parseVelocyPackBody(
} else {
return _request->toVelocyPack(options);
}
#else
return _request->toVelocyPack(options);
#endif
} catch (std::bad_alloc const&) {
generateOOMError();
} catch (VPackException const& e) {

View File

@ -162,7 +162,7 @@ void BootstrapFeature::start() {
_isReady = true;
}
void BootstrapFeature::stop() {
void BootstrapFeature::unprepare() {
auto server = ApplicationServer::getFeature<DatabaseServerFeature>("DatabaseServer");
TRI_server_t* s = server->SERVER;

View File

@ -33,7 +33,8 @@ class BootstrapFeature final : public application_features::ApplicationFeature {
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
bool isReady() const {
return _isReady;
}

View File

@ -60,7 +60,7 @@ void ConsoleFeature::start() {
_consoleThread->start();
}
void ConsoleFeature::stop() {
void ConsoleFeature::unprepare() {
if (_operationMode != OperationMode::MODE_CONSOLE) {
return;
}

View File

@ -36,7 +36,7 @@ class ConsoleFeature final : public application_features::ApplicationFeature {
public:
void start() override final;
void stop() override final;
void unprepare() override final;
private:
OperationMode _operationMode;

View File

@ -171,7 +171,7 @@ void DatabaseFeature::start() {
}
}
void DatabaseFeature::stop() {
void DatabaseFeature::unprepare() {
// close all databases
closeDatabases();

View File

@ -39,7 +39,7 @@ class DatabaseFeature final : public application_features::ApplicationFeature {
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
public:
TRI_vocbase_t* vocbase() const { return _vocbase; }

View File

@ -82,7 +82,7 @@ void DatabaseServerFeature::start() {
}
}
void DatabaseServerFeature::stop() {
void DatabaseServerFeature::unprepare() {
// turn off index threads
INDEX_POOL = nullptr;
_indexPool.reset();

View File

@ -47,7 +47,7 @@ class DatabaseServerFeature final
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
uint64_t _indexThreads = 2;

View File

@ -85,7 +85,7 @@ void QueryRegistryFeature::start() {
DatabaseServerFeature::SERVER->_queryRegistry = _queryRegistry.get();
}
void QueryRegistryFeature::stop() {
void QueryRegistryFeature::unprepare() {
// clear the query registery
DatabaseServerFeature::SERVER->_queryRegistry = nullptr;
// TODO: reset QUERY_REGISTRY as well?

View File

@ -42,7 +42,7 @@ class QueryRegistryFeature final : public application_features::ApplicationFeatu
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
bool _queryTracking = true;

View File

@ -315,13 +315,14 @@ void RestServerFeature::stop() {
for (auto& server : _servers) {
server->stop();
}
}
void RestServerFeature::unprepare() {
for (auto& server : _servers) {
delete server;
}
_httpOptions._vocbase = nullptr;
RESTSERVER = nullptr;
}

View File

@ -78,6 +78,7 @@ class RestServerFeature final
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
double _keepAliveTimeout;

View File

@ -58,6 +58,12 @@ SchedulerFeature::SchedulerFeature(
startsAfter("WorkMonitor");
}
SchedulerFeature::~SchedulerFeature() {
if (_scheduler != nullptr) {
delete _scheduler;
}
}
void SchedulerFeature::collectOptions(
std::shared_ptr<options::ProgramOptions> options) {
options->addSection("scheduler", "Configure the I/O scheduler");
@ -141,14 +147,13 @@ void SchedulerFeature::stop() {
}
_scheduler->shutdown();
// delete the scheduler
delete _scheduler;
_scheduler = nullptr;
SCHEDULER = nullptr;
}
}
void SchedulerFeature::unprepare() {
SCHEDULER = nullptr;
}
#ifdef _WIN32
bool CtrlHandler(DWORD eventType) {
bool shutdown = false;

View File

@ -40,12 +40,14 @@ class SchedulerFeature final : public application_features::ApplicationFeature {
public:
explicit SchedulerFeature(application_features::ApplicationServer* server);
~SchedulerFeature();
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
uint64_t _nrSchedulerThreads;

View File

@ -54,7 +54,7 @@ void StatisticsFeature::start() {
TRI_InitializeStatistics();
}
void StatisticsFeature::stop() {
void StatisticsFeature::unprepare() {
TRI_ShutdownStatistics();
STATISTICS = nullptr;
}

View File

@ -42,7 +42,7 @@ class StatisticsFeature final
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
public:
void disableStatistics() { _statistics = false; }

View File

@ -70,7 +70,7 @@ VelocyPackCursor::VelocyPackCursor(TRI_vocbase_t* vocbase, CursorId id,
: Cursor(id, batchSize, extra, ttl, hasCount),
_vocbase(vocbase),
_result(std::move(result)),
_iterator(_result.result->slice(), true),
_iterator(_result.result->slice()),
_cached(_result.cached) {
TRI_ASSERT(_result.result->slice().isArray());
TRI_UseVocBase(vocbase);

View File

@ -1146,7 +1146,7 @@ OperationResult Transaction::anyLocal(std::string const& collectionName,
}
VPackSlice docs = result->slice();
VPackArrayIterator it(docs, true);
VPackArrayIterator it(docs);
while (it.valid()) {
resultBuilder.add(it.value());
it.next();

View File

@ -224,7 +224,7 @@ void V8DealerFeature::start() {
startGarbageCollection();
}
void V8DealerFeature::stop() {
void V8DealerFeature::unprepare() {
shutdownContexts();
// delete GC thread after all action threads have been stopped

View File

@ -45,7 +45,7 @@ class V8DealerFeature final : public application_features::ApplicationFeature {
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
double _gcFrequency;

View File

@ -84,6 +84,7 @@ class AuthEntry {
class AuthResult {
public:
AuthResult() : _authorized(false), _mustChange(false) {}
std::string _username;
bool _authorized;
bool _mustChange;

View File

@ -454,7 +454,7 @@ bool LogfileManager::open() {
return true;
}
void LogfileManager::stop() {
void LogfileManager::unprepare() {
_shutdown = 1;
LOG(TRACE) << "shutting down WAL";

View File

@ -116,7 +116,7 @@ class LogfileManager final : public application_features::ApplicationFeature {
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
public:
// run the recovery procedure

View File

@ -314,6 +314,6 @@ void BenchFeature::start() {
*_result = ret;
}
void BenchFeature::stop() {
void BenchFeature::unprepare() {
ARANGOBENCH = nullptr;
}

View File

@ -32,8 +32,8 @@ class BenchFeature final : public application_features::ApplicationFeature {
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override;
void start() override;
void stop() override;
void start() override final;
void unprepare() override final;
public:
bool async() const { return _async; }

View File

@ -136,7 +136,7 @@ void ConsoleFeature::start() {
#endif
}
void ConsoleFeature::stop() {
void ConsoleFeature::unprepare() {
closeLog();
}

View File

@ -36,7 +36,7 @@ class ConsoleFeature final : public application_features::ApplicationFeature {
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
public:
bool quiet() const { return _quiet; }

View File

@ -130,7 +130,7 @@ void V8ShellFeature::start() {
initGlobals();
}
void V8ShellFeature::stop() {
void V8ShellFeature::unprepare() {
{
v8::Locker locker{_isolate};

View File

@ -44,8 +44,8 @@ class V8ShellFeature final : public application_features::ApplicationFeature {
void collectOptions(std::shared_ptr<options::ProgramOptions>) override;
void validateOptions(
std::shared_ptr<options::ProgramOptions> options) override;
void start() override;
void stop() override;
void start() override final;
void unprepare() override final;
private:
std::string _startupDirectory;

View File

@ -88,14 +88,14 @@ var TextHighlightRules = require("./text_highlight_rules").TextHighlightRules;
var AqlHighlightRules = function() {
var keywords = (
"for|return|filter|sort|limit|let|collect|asc|desc|in|into|insert|update|remove|replace|upsert|options|with|and|or|not|distinct|graph|outbound|inbound|any|all|none|aggregate|like|count"
"for|return|filter|sort|limit|let|collect|asc|desc|in|into|insert|update|remove|replace|upsert|options|with|and|or|not|distinct|graph|shortest_path|outbound|inbound|any|all|none|aggregate|like"
);
var builtinFunctions = (
"(to_bool|to_number|to_string|to_list|is_null|is_bool|is_number|is_string|is_list|is_document|typename|" +
"concat|concat_separator|char_length|lower|upper|substring|left|right|trim|reverse|contains|" +
"log|log2|log10|exp|exp2|sin|cos|tan|asin|acos|atan|atan2|radians|degrees|pi|regex|" +
"like|floor|ceil|round|abs|rand|sqrt|pow|length|min|max|average|sum|median|variance_population|" +
"log|log2|log10|exp|exp2|sin|cos|tan|asin|acos|atan|atan2|radians|degrees|pi|regex_test|" +
"like|floor|ceil|round|abs|rand|sqrt|pow|length|count|min|max|average|sum|median|variance_population|" +
"variance_sample|first|last|unique|matches|merge|merge_recursive|has|attributes|values|unset|unset_recursive|keep|" +
"near|within|within_rectangle|is_in_polygon|fulltext|paths|traversal|traversal_tree|edges|stddev_sample|stddev_population|" +
"slice|nth|position|translate|zip|call|apply|push|append|pop|shift|unshift|remove_value|remove_values|" +

View File

@ -54,7 +54,7 @@ var getStorage = function() {
var c = db._collection("_apps");
if (c === null) {
c = db._create("_apps", {isSystem: true, replicationFactor: 1,
distributeShardsLike: "_graphs"});
distributeShardsLike: "_graphs", journalSize: 4 * 1024 * 1024});
c.ensureIndex({ type: "hash", fields: [ "mount" ], unique: true });
}
return c;

View File

@ -2301,7 +2301,7 @@ function AQL_LIKE (value, regex, caseInsensitive) {
/// @brief searches a substring in a string, using a regex
////////////////////////////////////////////////////////////////////////////////
function AQL_REGEX (value, regex, caseInsensitive) {
function AQL_REGEX_TEST (value, regex, caseInsensitive) {
'use strict';
var modifiers = '';
@ -8358,7 +8358,7 @@ exports.AQL_UPPER = AQL_UPPER;
exports.AQL_SUBSTRING = AQL_SUBSTRING;
exports.AQL_CONTAINS = AQL_CONTAINS;
exports.AQL_LIKE = AQL_LIKE;
exports.AQL_REGEX = AQL_REGEX;
exports.AQL_REGEX_TEST = AQL_REGEX_TEST;
exports.AQL_LEFT = AQL_LEFT;
exports.AQL_RIGHT = AQL_RIGHT;
exports.AQL_TRIM = AQL_TRIM;

View File

@ -49,10 +49,11 @@ function createStatisticsCollection (name) {
if (collection === null) {
var r = null;
try {
r = db._create(name, { isSystem: true, waitForSync: false,
replicationFactor: 1,
journalSize: 8 * 1024 * 1024,
distributeShardsLike: "_graphs" });
}
catch (err) {
@ -424,7 +425,6 @@ exports.STATISTICS_INTERVAL = 10;
exports.STATISTICS_HISTORY_INTERVAL = 15 * 60;
////////////////////////////////////////////////////////////////////////////////
/// @brief createCollections
///

View File

@ -62,18 +62,18 @@ function ahuacatlStringFunctionsTestSuite () {
////////////////////////////////////////////////////////////////////////////////
testRegexInvalid : function () {
assertQueryError(errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH.code, "RETURN REGEX()");
assertQueryError(errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH.code, "RETURN REGEX(\"test\")");
assertQueryError(errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH.code, "RETURN REGEX(\"test\", \"meow\", \"foo\", \"bar\")");
assertQueryError(errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH.code, "RETURN REGEX_TEST()");
assertQueryError(errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH.code, "RETURN REGEX_TEST(\"test\")");
assertQueryError(errors.ERROR_QUERY_FUNCTION_ARGUMENT_NUMBER_MISMATCH.code, "RETURN REGEX_TEST(\"test\", \"meow\", \"foo\", \"bar\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"[\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"[^\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"a.(\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"(a\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"(a]\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"**\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"?\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX(\"test\", \"*\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"[\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"[^\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"a.(\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"(a\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"(a]\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"**\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"?\")");
assertQueryWarningAndFalse(errors.ERROR_QUERY_INVALID_REGEX.code, "RETURN REGEX_TEST(\"test\", \"*\")");
},
testRegex : function () {
@ -217,16 +217,21 @@ function ahuacatlStringFunctionsTestSuite () {
];
values.forEach(function(v) {
var query = "RETURN REGEX(@what, @re)";
var query = "RETURN REGEX_TEST(@what, @re)";
assertEqual(v[2], getQueryResults(query, { what: v[0], re: v[1] })[0], v);
query = "RETURN NOOPT(REGEX(@what, @re))";
query = "RETURN NOOPT(REGEX_TEST(@what, @re))";
assertEqual(v[2], getQueryResults(query, { what: v[0], re: v[1] })[0], v);
query = "RETURN NOOPT(V8(REGEX(@what, @re)))";
query = "RETURN NOOPT(V8(REGEX_TEST(@what, @re)))";
assertEqual(v[2], getQueryResults(query, { what: v[0], re: v[1] })[0], v);
query = "RETURN @what =~ @re";
assertEqual(v[2], getQueryResults(query, { what: v[0], re: v[1] })[0], v);
query = "RETURN @what !~ @re";
assertEqual(!v[2], getQueryResults(query, { what: v[0], re: v[1] })[0], v);
});
},
////////////////////////////////////////////////////////////////////////////////

View File

@ -0,0 +1,113 @@
/*jshint globalstrict:false, strict:false, maxlen:5000 */
/*global assertEqual, AQL_EXECUTE */
////////////////////////////////////////////////////////////////////////////////
/// @brief tests for query language, functions
///
/// @file
///
/// DISCLAIMER
///
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
///
/// Licensed under the Apache License, Version 2.0 (the "License");
/// you may not use this file except in compliance with the License.
/// You may obtain a copy of the License at
///
/// http://www.apache.org/licenses/LICENSE-2.0
///
/// Unless required by applicable law or agreed to in writing, software
/// distributed under the License is distributed on an "AS IS" BASIS,
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
/// See the License for the specific language governing permissions and
/// limitations under the License.
///
/// Copyright holder is triAGENS GmbH, Cologne, Germany
///
/// @author Jan Steemann
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
////////////////////////////////////////////////////////////////////////////////
var internal = require("internal");
var jsunity = require("jsunity");
var db = internal.db;
////////////////////////////////////////////////////////////////////////////////
/// @brief test suite
////////////////////////////////////////////////////////////////////////////////
function ahuacatlRegexTestSuite () {
var c;
return {
////////////////////////////////////////////////////////////////////////////////
/// @brief set up
////////////////////////////////////////////////////////////////////////////////
setUp : function () {
db._drop("UnitTestsAhuacatlRegex");
c = db._create("UnitTestsAhuacatlRegex");
for (var i = 0; i < 1000; ++i) {
c.insert({ _key: "test" + i });
}
},
////////////////////////////////////////////////////////////////////////////////
/// @brief tear down
////////////////////////////////////////////////////////////////////////////////
tearDown : function () {
db._drop("UnitTestsAhuacatlRegex");
c = null;
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test regex matching
////////////////////////////////////////////////////////////////////////////////
testRegexMatch : function () {
var values = [
[ '^test$', 0 ],
[ '^test\\d+$', 1000 ],
[ '^test1$', 1 ],
[ '^test1', 111 ],
[ '^test1*', 1000 ],
[ '^test1+', 111 ],
[ '^test1..$', 100 ],
[ '^test1.$', 10 ],
[ '^test11.', 10 ],
[ 'test', 1000 ],
[ 'test123', 1 ],
[ 'test12', 11 ],
[ '111', 1 ],
[ '111$', 1 ],
[ '11$', 10 ],
[ '1$', 100 ]
];
values.forEach(function(v) {
// test match
var query = "FOR doc IN @@collection FILTER doc._key =~ @re RETURN doc._key";
var result = AQL_EXECUTE(query, { "@collection": c.name(), re: v[0] }).json;
assertEqual(v[1], result.length);
// test non-match
query = "FOR doc IN @@collection FILTER doc._key !~ @re RETURN doc._key";
result = AQL_EXECUTE(query, { "@collection": c.name(), re: v[0] }).json;
assertEqual(1000 - v[1], result.length);
});
}
};
}
////////////////////////////////////////////////////////////////////////////////
/// @brief executes the test suite
////////////////////////////////////////////////////////////////////////////////
jsunity.run(ahuacatlRegexTestSuite);
return jsunity.done();

View File

@ -283,22 +283,26 @@ function SynchronousReplicationSuite () {
catch (e1) {
assertEqual(ERRORS.ERROR_ARANGO_DOCUMENT_NOT_FOUND.code, e1.errorNum);
}
assertEqual(2, c.count());
if (healing.place === 15) { healFailure(healing); }
if (failure.place === 16) { makeFailure(failure); }
c.remove([ids[0]._key, ids[1]._key]);
assertEqual(2, c.count());
if (healing.place === 16) { healFailure(healing); }
if (failure.place === 17) { makeFailure(failure); }
c.remove([ids[0]._key, ids[1]._key]);
if (healing.place === 17) { healFailure(healing); }
if (failure.place === 18) { makeFailure(failure); }
docs = c.document([ids[0]._key, ids[1]._key]);
assertEqual(2, docs.length);
assertTrue(docs[0].error);
assertTrue(docs[1].error);
if (healing.place === 17) { healFailure(healing); }
if (healing.place === 18) { healFailure(healing); }
}
////////////////////////////////////////////////////////////////////////////////
@ -336,7 +340,6 @@ function SynchronousReplicationSuite () {
/// @brief check whether we have access to global.instanceInfo
////////////////////////////////////////////////////////////////////////////////
/*
testCheckInstanceInfo : function () {
assertTrue(global.instanceInfo !== undefined);
},
@ -376,7 +379,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail1 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:1, follower:true}, {place:17, follower:true});
runBasicOperations({place:1, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -386,7 +389,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail2 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:2, follower:true}, {place:17, follower:true});
runBasicOperations({place:2, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -396,7 +399,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail3 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:3, follower:true}, {place:17, follower:true});
runBasicOperations({place:3, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -406,7 +409,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail4 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:4, follower:true}, {place:17, follower:true});
runBasicOperations({place:4, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -416,7 +419,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail5 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:5, follower:true}, {place:17, follower:true});
runBasicOperations({place:5, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -426,7 +429,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail6 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:6, follower:true}, {place:17, follower:true});
runBasicOperations({place:6, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -436,7 +439,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail7 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:7, follower:true}, {place:17, follower:true});
runBasicOperations({place:7, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -446,7 +449,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail8 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:8, follower:true}, {place:17, follower:true});
runBasicOperations({place:8, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -456,7 +459,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail9 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:9, follower:true}, {place:17, follower:true});
runBasicOperations({place:9, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -466,7 +469,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail10 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:10, follower:true}, {place:17, follower:true});
runBasicOperations({place:10, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -476,7 +479,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail11 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:11, follower:true}, {place:17, follower:true});
runBasicOperations({place:11, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -486,7 +489,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail12 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:12, follower:true}, {place:17, follower:true});
runBasicOperations({place:12, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -496,7 +499,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail13 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:13, follower:true}, {place:17, follower:true});
runBasicOperations({place:13, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -506,7 +509,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail14 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:14, follower:true}, {place:17, follower:true});
runBasicOperations({place:14, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -516,7 +519,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail15 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:15, follower:true}, {place:17, follower:true});
runBasicOperations({place:15, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -526,7 +529,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail16 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:16, follower:true}, {place:17, follower:true});
runBasicOperations({place:16, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -536,10 +539,20 @@ function SynchronousReplicationSuite () {
testBasicOperationsFollowerFail17 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:17, follower:true}, {place:17, follower:true});
runBasicOperations({place:17, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
*/
////////////////////////////////////////////////////////////////////////////////
/// @brief fail in place 18
////////////////////////////////////////////////////////////////////////////////
testBasicOperationsFollowerFail18 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:18, follower:true}, {place:18, follower:true});
assertTrue(waitForSynchronousReplication("_system"));
},
////////////////////////////////////////////////////////////////////////////////
/// @brief run a standard check with failures:
////////////////////////////////////////////////////////////////////////////////
@ -559,7 +572,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail1 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:1, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -570,7 +583,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail2 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:2, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -581,7 +594,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail3 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:3, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -592,7 +605,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail4 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:4, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -603,7 +616,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail5 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:5, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -614,7 +627,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail6 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:6, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -625,7 +638,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail7 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:7, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -636,7 +649,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail8 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:8, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -647,7 +660,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail9 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:9, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -658,7 +671,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail10 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:10, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -669,7 +682,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail11 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:11, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -680,7 +693,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail12 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:12, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -691,7 +704,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail13 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:13, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -702,7 +715,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail14 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:14, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -713,7 +726,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail15 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:15, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -724,7 +737,7 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail16 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:16, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
@ -735,7 +748,18 @@ function SynchronousReplicationSuite () {
testBasicOperationsLeaderFail17 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:17, follower: false},
{place:17, follower: false});
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},
////////////////////////////////////////////////////////////////////////////////
/// @brief fail leader in place 18
////////////////////////////////////////////////////////////////////////////////
testBasicOperationsLeaderFail18 : function () {
assertTrue(waitForSynchronousReplication("_system"));
runBasicOperations({place:18, follower: false},
{place:18, follower: false});
assertTrue(waitForSynchronousReplication("_system"));
},

View File

@ -625,7 +625,7 @@
task: function() {
// needs to be big enough for assets
return createSystemCollection("_routing", {
journalSize: 8 * 1024 * 1024,
journalSize: 4 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: "_graphs"
});
@ -692,7 +692,7 @@
task: function() {
return createSystemCollection("_aqlfunctions", {
journalSize: 2 * 1024 * 1024,
journalSize: 1 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: "_graphs"
});
@ -763,7 +763,7 @@
task: function() {
return createSystemCollection("_jobs", {
journalSize: 4 * 1024 * 1024,
journalSize: 2 * 1024 * 1024,
replicationFactor: DEFAULT_REPLICATION_FACTOR_SYSTEM,
distributeShardsLike: "_graphs"
});

View File

@ -72,9 +72,12 @@ void ApplicationFeature::start() {}
// notify the feature about a shutdown request
void ApplicationFeature::beginShutdown() {}
// stop and shut down the feature
// stop the feature
void ApplicationFeature::stop() {}
// shut down the feature
void ApplicationFeature::unprepare() {}
// determine all direct and indirect ancestors of a feature
std::unordered_set<std::string> ApplicationFeature::ancestors() const {
TRI_ASSERT(_ancestorsDetermined);

View File

@ -126,9 +126,12 @@ class ApplicationFeature {
// notify the feature about a shutdown request
virtual void beginShutdown();
// stop and shut down the feature
// stop the feature
virtual void stop();
// shut down the feature
virtual void unprepare();
protected:
// return the ApplicationServer instance
ApplicationServer* server() const { return _server; }

View File

@ -205,6 +205,11 @@ void ApplicationServer::run(int argc, char* argv[]) {
reportServerProgress(_state);
stop();
// unprepare all features
_state = ServerState::IN_UNPREPARE;
reportServerProgress(_state);
unprepare();
// stopped
_state = ServerState::STOPPED;
reportServerProgress(_state);
@ -546,6 +551,19 @@ void ApplicationServer::stop() {
}
}
void ApplicationServer::unprepare() {
LOG_TOPIC(TRACE, Logger::STARTUP) << "ApplicationServer::unprepare";
for (auto it = _orderedFeatures.rbegin(); it != _orderedFeatures.rend(); ++it) {
auto feature = *it;
LOG_TOPIC(TRACE, Logger::STARTUP) << feature->name() << "::unprepare";
feature->unprepare();
feature->state(FeatureState::UNPREPARED);
reportFeatureProgress(_state, feature->name());
}
}
void ApplicationServer::wait() {
LOG_TOPIC(TRACE, Logger::STARTUP) << "ApplicationServer::wait";

View File

@ -44,6 +44,7 @@ enum class ServerState {
IN_START,
IN_WAIT,
IN_STOP,
IN_UNPREPARE,
STOPPED,
ABORT
};
@ -103,6 +104,11 @@ class ProgressHandler {
// `stop`
//
// Stops the features. The `stop` methods are called in reversed `start` order.
// This must stop all threads, but not destroy the features.
//
// `unprepare`
//
// This destroys the features.
class ApplicationServer {
ApplicationServer(ApplicationServer const&) = delete;
@ -115,7 +121,8 @@ class ApplicationServer {
VALIDATED,
PREPARED,
STARTED,
STOPPED
STOPPED,
UNPREPARED
};
static ApplicationServer* server;
@ -252,6 +259,9 @@ class ApplicationServer {
// stops features
void stop();
// destroys features
void unprepare();
// after start, the server will wait in this method until
// beginShutdown is called
void wait();

View File

@ -118,7 +118,7 @@ void DaemonFeature::daemonize() {
}
}
void DaemonFeature::stop() {
void DaemonFeature::unprepare() {
if (!_daemon) {
return;
}

View File

@ -36,7 +36,7 @@ class DaemonFeature final : public application_features::ApplicationFeature {
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void validateOptions(std::shared_ptr<options::ProgramOptions>) override final;
void daemonize() override final;
void stop() override final;
void unprepare() override final;
public:
void setDaemon(bool value) { _daemon = value; }

View File

@ -51,10 +51,6 @@ void NonceFeature::prepare() {
}
}
void NonceFeature::start() {
LOG(DEBUG) << "setting nonce hash size to " << _size;
}
void NonceFeature::stop() {
void NonceFeature::unprepare() {
Nonce::destroy();
}

View File

@ -36,8 +36,7 @@ class NonceFeature : public application_features::ApplicationFeature {
public:
void collectOptions(std::shared_ptr<options::ProgramOptions>) override final;
void prepare() override final;
void start() override final;
void stop() override final;
void unprepare() override final;
private:
uint64_t _size;

View File

@ -33,6 +33,15 @@ using namespace arangodb::application_features;
using namespace arangodb::basics;
using namespace arangodb::options;
static bool DONE = false;
static int CLIENT_PID = false;
static void StopHandler(int) {
LOG_TOPIC(INFO, Logger::STARTUP) << "received SIGINT for supervisor";
kill(CLIENT_PID, SIGTERM);
DONE = true;
}
SupervisorFeature::SupervisorFeature(
application_features::ApplicationServer* server)
: ApplicationFeature(server, "Supervisor"), _supervisor(false) {
@ -104,11 +113,8 @@ void SupervisorFeature::daemonize() {
// parent (supervisor)
if (0 < _clientPid) {
LOG_TOPIC(DEBUG, Logger::STARTUP) << "supervisor mode: within parent";
TRI_SetProcessTitle("arangodb [supervisor]");
ArangoGlobalContext::CONTEXT->unmaskStandardSignals();
std::for_each(supervisorFeatures.begin(), supervisorFeatures.end(),
[](ApplicationFeature* feature) {
LoggerFeature* logger =
@ -126,81 +132,98 @@ void SupervisorFeature::daemonize() {
std::for_each(supervisorFeatures.begin(), supervisorFeatures.end(),
[](ApplicationFeature* feature) { feature->start(); });
LOG_TOPIC(DEBUG, Logger::STARTUP) << "supervisor mode: within parent";
ArangoGlobalContext::CONTEXT->unmaskStandardSignals();
signal(SIGINT, StopHandler);
signal(SIGTERM, StopHandler);
CLIENT_PID = _clientPid;
DONE = false;
int status;
waitpid(_clientPid, &status, 0);
int res = waitpid(_clientPid, &status, 0);
bool horrible = true;
if (WIFEXITED(status)) {
// give information about cause of death
if (WEXITSTATUS(status) == 0) {
LOG_TOPIC(INFO, Logger::STARTUP) << "child " << _clientPid
<< " died of natural causes";
done = true;
horrible = false;
} else {
t = time(0) - startTime;
if (!DONE) {
done = true;
horrible = false;
}
else {
LOG_TOPIC(DEBUG, Logger::STARTUP) << "waitpid woke up with return value "
<< res << " and status " << status;
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child " << _clientPid
<< " died a horrible death, exit status " << WEXITSTATUS(status);
if (WIFEXITED(status)) {
// give information about cause of death
if (WEXITSTATUS(status) == 0) {
LOG_TOPIC(INFO, Logger::STARTUP) << "child " << _clientPid
<< " died of natural causes";
done = true;
horrible = false;
} else {
t = time(0) - startTime;
if (t < MIN_TIME_ALIVE_IN_SEC) {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child only survived for " << t
<< " seconds, this will not work - please fix the error "
"first";
done = true;
} else {
done = false;
}
}
} else if (WIFSIGNALED(status)) {
switch (WTERMSIG(status)) {
case 2:
case 9:
case 15:
LOG_TOPIC(INFO, Logger::STARTUP)
<< "child " << _clientPid
<< " died of natural causes, exit status " << WTERMSIG(status);
done = true;
horrible = false;
break;
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child " << _clientPid
<< " died a horrible death, exit status " << WEXITSTATUS(status);
default:
t = time(0) - startTime;
if (t < MIN_TIME_ALIVE_IN_SEC) {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child only survived for " << t
<< " seconds, this will not work - please fix the error "
"first";
done = true;
} else {
done = false;
}
}
} else if (WIFSIGNALED(status)) {
switch (WTERMSIG(status)) {
case 2:
case 9:
case 15:
LOG_TOPIC(INFO, Logger::STARTUP)
<< "child " << _clientPid
<< " died of natural causes, exit status " << WTERMSIG(status);
done = true;
horrible = false;
break;
LOG_TOPIC(ERR, Logger::STARTUP) << "child " << _clientPid
<< " died a horrible death, signal "
<< WTERMSIG(status);
default:
t = time(0) - startTime;
if (t < MIN_TIME_ALIVE_IN_SEC) {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child only survived for " << t
<< " seconds, this will not work - please fix the "
"error first";
done = true;
LOG_TOPIC(ERR, Logger::STARTUP) << "child " << _clientPid
<< " died a horrible death, signal "
<< WTERMSIG(status);
if (t < MIN_TIME_ALIVE_IN_SEC) {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child only survived for " << t
<< " seconds, this will not work - please fix the "
"error first";
done = true;
#ifdef WCOREDUMP
if (WCOREDUMP(status)) {
LOG_TOPIC(WARN, Logger::STARTUP) << "child process "
<< _clientPid
<< " produced a core dump";
}
if (WCOREDUMP(status)) {
LOG_TOPIC(WARN, Logger::STARTUP) << "child process "
<< _clientPid
<< " produced a core dump";
}
#endif
} else {
done = false;
}
} else {
done = false;
}
break;
}
} else {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child " << _clientPid
<< " died a horrible death, unknown cause";
done = false;
break;
}
} else {
LOG_TOPIC(ERR, Logger::STARTUP)
<< "child " << _clientPid
<< " died a horrible death, unknown cause";
done = false;
}
}
// remove pid file
if (horrible) {
result = EXIT_FAILURE;
}
@ -223,5 +246,8 @@ void SupervisorFeature::daemonize() {
std::for_each(supervisorFeatures.rbegin(), supervisorFeatures.rend(),
[](ApplicationFeature* feature) { feature->stop(); });
std::for_each(supervisorFeatures.rbegin(), supervisorFeatures.rend(),
[](ApplicationFeature* feature) { feature->unprepare(); });
exit(result);
}

View File

@ -80,7 +80,7 @@ void V8PlatformFeature::start() {
_allocator.reset(new ArrayBufferAllocator);
}
void V8PlatformFeature::stop() {
void V8PlatformFeature::unprepare() {
v8::V8::Dispose();
v8::V8::ShutdownPlatform();
_platform.reset();

Some files were not shown because too many files have changed in this diff Show More