1
0
Fork 0

Feature/efficiency (#3736)

This commit is contained in:
Jan 2018-01-05 16:51:31 +01:00 committed by GitHub
parent e6572e6804
commit b2b6c06cbf
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
143 changed files with 2279 additions and 2273 deletions

View File

@ -56,7 +56,7 @@ class AttributeTranslator {
void seal();
Builder* builder() { return _builder; }
Builder* builder() const { return _builder; }
// translate from string to id
uint8_t const* translate(std::string const& key) const;

View File

@ -39,11 +39,8 @@ namespace velocypack {
template <typename T>
class Buffer {
public:
Buffer() : _buffer(_local), _alloc(sizeof(_local)), _pos(0) {
#ifdef VELOCYPACK_DEBUG
// poison memory
memset(_buffer, 0xa5, _alloc);
#endif
Buffer() : _buffer(_local), _capacity(sizeof(_local)), _size(0) {
poison(_buffer, _capacity);
initWithNone();
}
@ -53,72 +50,72 @@ class Buffer {
}
Buffer(Buffer const& that) : Buffer() {
if (that._pos > 0) {
if (that._pos > sizeof(_local)) {
_buffer = new T[checkOverflow(that._pos)];
_alloc = that._pos;
if (that._size > 0) {
if (that._size > sizeof(_local)) {
_buffer = new T[checkOverflow(that._size)];
_capacity = that._size;
}
else {
_alloc = sizeof(_local);
_capacity = sizeof(_local);
}
memcpy(_buffer, that._buffer, checkOverflow(that._pos));
_pos = that._pos;
memcpy(_buffer, that._buffer, checkOverflow(that._size));
_size = that._size;
}
}
Buffer& operator=(Buffer const& that) {
if (this != &that) {
if (that._pos <= _alloc) {
if (that._size <= _capacity) {
// our own buffer is big enough to hold the data
initWithNone();
memcpy(_buffer, that._buffer, checkOverflow(that._pos));
memcpy(_buffer, that._buffer, checkOverflow(that._size));
}
else {
// our own buffer is not big enough to hold the data
auto buffer = new T[checkOverflow(that._pos)];
auto buffer = new T[checkOverflow(that._size)];
initWithNone();
memcpy(buffer, that._buffer, checkOverflow(that._pos));
memcpy(buffer, that._buffer, checkOverflow(that._size));
if (_buffer != _local) {
delete[] _buffer;
}
_buffer = buffer;
_alloc = that._pos;
_capacity = that._size;
}
_pos = that._pos;
_size = that._size;
}
return *this;
}
Buffer(Buffer&& that) : Buffer() {
Buffer(Buffer&& that) noexcept : _buffer(_local), _capacity(sizeof(_local)) {
if (that._buffer == that._local) {
memcpy(_buffer, that._buffer, checkOverflow(that._pos));
memcpy(_buffer, that._buffer, static_cast<size_t>(that._size));
} else {
_buffer = that._buffer;
_alloc = that._alloc;
_capacity = that._capacity;
that._buffer = that._local;
that._alloc = sizeof(that._local);
that._capacity = sizeof(that._local);
}
_pos = that._pos;
that._pos = 0;
_size = that._size;
that._size = 0;
}
Buffer& operator=(Buffer&& that) {
Buffer& operator=(Buffer&& that) noexcept {
if (this != &that) {
if (that._buffer == that._local) {
memcpy(_buffer, that._buffer, checkOverflow(that._pos));
memcpy(_buffer, that._buffer, static_cast<size_t>(that._size));
} else {
if (_buffer != _local) {
delete[] _buffer;
}
_buffer = that._buffer;
_alloc = that._alloc;
_capacity = that._capacity;
that._buffer = that._local;
that._alloc = sizeof(that._local);
that._capacity = sizeof(that._local);
}
_pos = that._pos;
that._pos = 0;
_size = that._size;
that._size = 0;
}
return *this;
}
@ -128,27 +125,46 @@ class Buffer {
inline T* data() { return _buffer; }
inline T const* data() const { return _buffer; }
inline bool empty() const { return _pos == 0; }
inline ValueLength size() const { return _pos; }
inline ValueLength length() const { return _pos; }
inline ValueLength byteSize() const { return _pos; }
inline bool empty() const { return _size == 0; }
inline ValueLength size() const { return _size; }
inline ValueLength length() const { return _size; }
inline ValueLength byteSize() const { return _size; }
inline ValueLength capacity() const noexcept { return _alloc; }
inline ValueLength capacity() const { return _capacity; }
std::string toString() const {
return std::string(reinterpret_cast<char const*>(_buffer), _pos);
return std::string(reinterpret_cast<char const*>(_buffer), _size);
}
void reset() noexcept {
_pos = 0;
_size = 0;
initWithNone();
}
void resetTo(ValueLength position) {
if (position > _alloc) {
if (position > _capacity) {
throw Exception(Exception::IndexOutOfBounds);
}
_pos = position;
_size = position;
}
// move internal buffer position one byte ahead
inline void advance() noexcept {
advance(1);
}
// move internal buffer position n bytes ahead
inline void advance(size_t value) noexcept {
VELOCYPACK_ASSERT(_size <= _capacity);
VELOCYPACK_ASSERT(_size + value <= _capacity);
_size += value;
}
// move internal buffer position n bytes backward
inline void rollback(size_t value) noexcept {
VELOCYPACK_ASSERT(_size <= _capacity);
VELOCYPACK_ASSERT(_size >= value);
_size -= value;
}
void clear() {
@ -156,15 +172,12 @@ class Buffer {
if (_buffer != _local) {
delete[] _buffer;
_buffer = _local;
_alloc = sizeof(_local);
#ifdef VELOCYPACK_DEBUG
// poison memory
memset(_buffer, 0xa5, _alloc);
#endif
_capacity = sizeof(_local);
poison(_buffer, _capacity);
initWithNone();
}
}
inline T& operator[](size_t position) noexcept {
return _buffer[position];
}
@ -174,14 +187,14 @@ class Buffer {
}
inline T& at(size_t position) {
if (position >= _pos) {
if (position >= _size) {
throw Exception(Exception::IndexOutOfBounds);
}
return operator[](position);
}
inline T const& at(size_t position) const {
if (position >= _pos) {
if (position >= _size) {
throw Exception(Exception::IndexOutOfBounds);
}
return operator[](position);
@ -189,78 +202,81 @@ class Buffer {
inline void push_back(char c) {
reserve(1);
_buffer[_pos++] = c;
_buffer[_size++] = c;
}
void append(uint8_t const* p, ValueLength len) {
reserve(len);
memcpy(_buffer + _pos, p, checkOverflow(len));
_pos += len;
memcpy(_buffer + _size, p, checkOverflow(len));
_size += len;
}
void append(char const* p, ValueLength len) {
reserve(len);
memcpy(_buffer + _pos, p, checkOverflow(len));
_pos += len;
memcpy(_buffer + _size, p, checkOverflow(len));
_size += len;
}
void append(std::string const& value) {
return append(value.c_str(), value.size());
return append(value.data(), value.size());
}
void append(Buffer<T> const& value) {
return append(value.data(), value.size());
}
void reserve(ValueLength len) {
if (_pos + len < _alloc) {
return;
}
inline void reserve(ValueLength len) {
VELOCYPACK_ASSERT(_size <= _capacity);
VELOCYPACK_ASSERT(_pos + len >= sizeof(_local));
if (_size + len >= _capacity) {
grow(len);
}
}
private:
// initialize Buffer with a None value
inline void initWithNone() noexcept { _buffer[0] = '\x00'; }
// poison buffer memory, used only for debugging
#ifdef VELOCYPACK_DEBUG
inline void poison(T* p, ValueLength length) noexcept {
memset(p, 0xa5, length);
}
#else
inline void poison(T*, ValueLength) noexcept {}
#endif
void grow(ValueLength len) {
VELOCYPACK_ASSERT(_size + len >= sizeof(_local));
// need reallocation
ValueLength newLen = _pos + len;
static double const GrowthFactor = 1.25;
if (_pos > 0 && newLen < GrowthFactor * _pos) {
ValueLength newLen = _size + len;
static constexpr double growthFactor = 1.25;
if (_size > 0 && newLen < growthFactor * _size) {
// ensure the buffer grows sensibly and not by 1 byte only
newLen = static_cast<ValueLength>(GrowthFactor * _pos);
newLen = static_cast<ValueLength>(growthFactor * _size);
}
VELOCYPACK_ASSERT(newLen > _pos);
VELOCYPACK_ASSERT(newLen > _size);
// try not to initialize memory here
T* p = new T[checkOverflow(newLen)];
#ifdef VELOCYPACK_DEBUG
// poison memory
memset(p, 0xa5, newLen);
#endif
poison(p, newLen);
// copy old data
memcpy(p, _buffer, checkOverflow(_pos));
memcpy(p, _buffer, checkOverflow(_size));
if (_buffer != _local) {
delete[] _buffer;
}
_buffer = p;
_alloc = newLen;
_capacity = newLen;
VELOCYPACK_ASSERT(_size <= _capacity);
}
// reserve and zero fill
void prealloc(ValueLength len) {
reserve(len);
#ifdef VELOCYPACK_DEBUG
// poison memory
memset(_buffer + _pos, 0xa5, len);
#endif
_pos += len;
}
private:
// initialize Buffer with a None value
inline void initWithNone() noexcept { _buffer[0] = '\x00'; }
T* _buffer;
ValueLength _alloc;
ValueLength _pos;
ValueLength _capacity;
ValueLength _size;
// an already initialized space for small values
// an already allocated space for small values
T _local[192];
};

View File

@ -59,23 +59,9 @@ class Builder {
uint64_t offset;
};
void reserve(ValueLength len) { reserveSpace(len); }
private:
std::shared_ptr<Buffer<uint8_t>> _buffer; // Here we collect the result
uint8_t* _start; // Always points to the start of _buffer
ValueLength _size; // Always contains the size of _buffer
ValueLength _pos; // the append position, always <= _size
std::vector<ValueLength> _stack; // Start positions of
// open objects/arrays
std::vector<std::vector<ValueLength>> _index; // Indices for starts
// of subindex
bool _keyWritten; // indicates that in the current object the key
// has been written but the value not yet
// Here are the mechanics of how this building process works:
// The whole VPack being built starts at where _start points to
// and uses at most _size bytes. The variable _pos keeps the
// The whole VPack being built starts at where _start points to.
// The variable _pos keeps the
// current write position. The method "set" simply writes a new
// VPack subobject at the current write position and advances
// it. Whenever one makes an array or object, a ValueLength for
@ -94,29 +80,37 @@ class Builder {
// buffer. Whenever the stack is empty, one can use the start,
// size and slice methods to get out the ready built VPack
// object(s).
void reserve(ValueLength len) {
VELOCYPACK_ASSERT(_start == _bufferPtr->data());
VELOCYPACK_ASSERT(_start + _pos >= _bufferPtr->data());
VELOCYPACK_ASSERT(_start + _pos <= _bufferPtr->data() + _bufferPtr->size());
void reserveSpace(ValueLength len) {
// Reserves len bytes at pos of the current state (top of stack)
// or throws an exception
if (_pos + len < _size) {
if (_pos + len < _bufferPtr->size()) {
return; // All OK, we can just increase tos->pos by len
}
#ifndef VELOCYPACK_64BIT
checkOverflow(_pos + len);
(void) checkOverflow(_pos + len);
#endif
// copy builder pointer into local variable
// this avoids accessing the shared pointer repeatedly, which has
// a small but non-negligible cost
Buffer<uint8_t>* buffer = _buffer.get();
VELOCYPACK_ASSERT(buffer != nullptr);
buffer->prealloc(len);
_start = buffer->data();
_size = buffer->size();
_bufferPtr->reserve(len);
_start = _bufferPtr->data();
}
private:
std::shared_ptr<Buffer<uint8_t>> _buffer; // Here we collect the result
Buffer<uint8_t>* _bufferPtr; // used for quicker access than shared_ptr
uint8_t* _start; // Always points to the start of _buffer
ValueLength _pos; // the append position
std::vector<ValueLength> _stack; // Start positions of
// open objects/arrays
std::vector<std::vector<ValueLength>> _index; // Indices for starts
// of subindex
bool _keyWritten; // indicates that in the current object the key
// has been written but the value not yet
// Sort the indices by attribute name:
static void doActualSort(std::vector<SortEntry>& entries);
@ -141,12 +135,11 @@ class Builder {
// Constructor and destructor:
explicit Builder(std::shared_ptr<Buffer<uint8_t>>& buffer,
Options const* options = &Options::Defaults)
: _buffer(buffer), _pos(0), _keyWritten(false), options(options) {
if (_buffer.get() == nullptr) {
: _buffer(buffer), _bufferPtr(_buffer.get()), _pos(0), _keyWritten(false), options(options) {
if (_bufferPtr == nullptr) {
throw Exception(Exception::InternalError, "Buffer cannot be a nullptr");
}
_start = _buffer->data();
_size = _buffer->size();
_start = _bufferPtr->data();
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
@ -160,11 +153,11 @@ class Builder {
explicit Builder(Options const* options = &Options::Defaults)
: _buffer(new Buffer<uint8_t>()),
_bufferPtr(_buffer.get()),
_pos(0),
_keyWritten(false),
options(options) {
_start = _buffer->data();
_size = _buffer->size();
_start = _bufferPtr->data();
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
@ -173,10 +166,10 @@ class Builder {
explicit Builder(Buffer<uint8_t>& buffer,
Options const* options = &Options::Defaults)
: _pos(buffer.size()), _keyWritten(false), options(options) {
: _bufferPtr(nullptr), _pos(buffer.size()), _keyWritten(false), options(options) {
_buffer.reset(&buffer, BufferNonDeleter<uint8_t>());
_start = _buffer->data();
_size = _buffer->size();
_bufferPtr = _buffer.get();
_start = _bufferPtr->data();
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
@ -189,8 +182,8 @@ class Builder {
Builder(Builder const& that)
: _buffer(new Buffer<uint8_t>(*that._buffer)),
_start(_buffer->data()),
_size(_buffer->size()),
_bufferPtr(_buffer.get()),
_start(_bufferPtr->data()),
_pos(that._pos),
_stack(that._stack),
_index(that._index),
@ -202,31 +195,29 @@ class Builder {
}
Builder& operator=(Builder const& that) {
if (that.options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
if (this != &that) {
_buffer.reset(new Buffer<uint8_t>(*that._buffer));
_bufferPtr = _buffer.get();
_start = _bufferPtr->data();
_pos = that._pos;
_stack = that._stack;
_index = that._index;
_keyWritten = that._keyWritten;
options = that.options;
}
_buffer.reset(new Buffer<uint8_t>(*that._buffer));
_start = _buffer->data();
_size = _buffer->size();
_pos = that._pos;
_stack = that._stack;
_index = that._index;
_keyWritten = that._keyWritten;
options = that.options;
return *this;
}
Builder(Builder&& that) {
if (that.options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
}
if (!that.isClosed()) {
throw Exception(Exception::InternalError, "Cannot move an open Builder");
}
_buffer = that._buffer;
that._buffer.reset(new Buffer<uint8_t>());
_start = _buffer->data();
_size = _buffer->size();
_bufferPtr = _buffer.get();
// seems unnecessary
// that._buffer.reset(new Buffer<uint8_t>());
// that._bufferPtr = that._buffer.get();
_start = _bufferPtr->data();
_pos = that._pos;
_stack.clear();
_stack.swap(that._stack);
@ -234,34 +225,33 @@ class Builder {
_index.swap(that._index);
_keyWritten = that._keyWritten;
options = that.options;
that._start = that._buffer->data();
that._size = 0;
//that._start = that._bufferPtr->data();
that._pos = 0;
that._keyWritten = false;
}
Builder& operator=(Builder&& that) {
if (that.options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
}
if (!that.isClosed()) {
throw Exception(Exception::InternalError, "Cannot move an open Builder");
}
_buffer = that._buffer;
that._buffer.reset(new Buffer<uint8_t>());
_start = _buffer->data();
_size = _buffer->size();
_pos = that._pos;
_stack.clear();
_stack.swap(that._stack);
_index.clear();
_index.swap(that._index);
_keyWritten = that._keyWritten;
options = that.options;
that._start = that._buffer->data();
that._size = 0;
that._pos = 0;
that._keyWritten = false;
if (this != &that) {
_buffer = that._buffer;
_bufferPtr = _buffer.get();
// seems unnecessary
// that._buffer.reset(new Buffer<uint8_t>());
// that._bufferPtr = that._buffer.get();
_start = _bufferPtr->data();
_pos = that._pos;
_stack.clear();
_stack.swap(that._stack);
_index.clear();
_index.swap(that._index);
_keyWritten = that._keyWritten;
options = that.options;
// that._start = that._bufferPtr->data();
that._pos = 0;
that._keyWritten = false;
}
return *this;
}
@ -274,11 +264,12 @@ class Builder {
// After a steal the Builder is broken!
std::shared_ptr<Buffer<uint8_t>> res = _buffer;
_buffer.reset();
_bufferPtr = nullptr;
_pos = 0;
return res;
}
uint8_t const* data() const { return _buffer.get()->data(); }
uint8_t const* data() const noexcept { return _bufferPtr->data(); }
std::string toString() const;
@ -299,6 +290,8 @@ class Builder {
void clear() {
_pos = 0;
_stack.clear();
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->resetTo(0);
_keyWritten = false;
}
@ -347,19 +340,37 @@ class Builder {
}
// Add a subvalue into an object from a Value:
uint8_t* add(std::string const& attrName, Value const& sub);
uint8_t* add(char const* attrName, Value const& sub);
uint8_t* add(char const* attrName, size_t attrLength, Value const& sub);
uint8_t* add(std::string const& attrName, Value const& sub) {
return addInternal<Value>(attrName, sub);
}
uint8_t* add(char const* attrName, Value const& sub) {
return addInternal<Value>(attrName, sub);
}
uint8_t* add(char const* attrName, size_t attrLength, Value const& sub) {
return addInternal<Value>(attrName, attrLength, sub);
}
// Add a subvalue into an object from a Slice:
uint8_t* add(std::string const& attrName, Slice const& sub);
uint8_t* add(char const* attrName, Slice const& sub);
uint8_t* add(char const* attrName, size_t attrLength, Slice const& sub);
uint8_t* add(std::string const& attrName, Slice const& sub) {
return addInternal<Slice>(attrName, sub);
}
uint8_t* add(char const* attrName, Slice const& sub) {
return addInternal<Slice>(attrName, sub);
}
uint8_t* add(char const* attrName, size_t attrLength, Slice const& sub) {
return addInternal<Slice>(attrName, attrLength, sub);
}
// Add a subvalue into an object from a ValuePair:
uint8_t* add(std::string const& attrName, ValuePair const& sub);
uint8_t* add(char const* attrName, ValuePair const& sub);
uint8_t* add(char const* attrName, size_t attrLength, ValuePair const& sub);
uint8_t* add(std::string const& attrName, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, sub);
}
uint8_t* add(char const* attrName, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, sub);
}
uint8_t* add(char const* attrName, size_t attrLength, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, attrLength, sub);
}
// Add all subkeys and subvalues into an object from an ObjectIterator
// and leaves open the object intentionally
@ -387,11 +398,11 @@ class Builder {
try {
checkKeyIsString(Slice(sub).isString());
auto oldPos = _pos;
reserveSpace(1 + sizeof(void*));
reserve(1 + sizeof(void*));
// store pointer. this doesn't need to be portable
_start[_pos++] = 0x1d;
appendByteUnchecked(0x1d);
memcpy(_start + _pos, &sub, sizeof(void*));
_pos += sizeof(void*);
advance(sizeof(void*));
return _start + oldPos;
} catch (...) {
// clean up in case of an exception
@ -480,39 +491,34 @@ class Builder {
Builder& closeArray(ValueLength tos, std::vector<ValueLength>& index);
void addNull() {
reserveSpace(1);
_start[_pos++] = 0x18;
appendByte(0x18);
}
void addFalse() {
reserveSpace(1);
_start[_pos++] = 0x19;
appendByte(0x19);
}
void addTrue() {
reserveSpace(1);
_start[_pos++] = 0x1a;
appendByte(0x1a);
}
void addDouble(double v) {
uint64_t dv;
ValueLength vSize = sizeof(double);
memcpy(&dv, &v, vSize);
reserveSpace(1 + vSize);
_start[_pos++] = 0x1b;
reserve(1 + vSize);
appendByteUnchecked(0x1b);
for (uint64_t x = dv; vSize > 0; vSize--) {
_start[_pos++] = x & 0xff;
appendByteUnchecked(x & 0xff);
x >>= 8;
}
}
void addInt(int64_t v) {
if (v >= 0 && v <= 9) {
reserveSpace(1);
_start[_pos++] = static_cast<uint8_t>(0x30 + v);
appendByte(static_cast<uint8_t>(0x30 + v));
} else if (v < 0 && v >= -6) {
reserveSpace(1);
_start[_pos++] = static_cast<uint8_t>(0x40 + v);
appendByte(static_cast<uint8_t>(0x40 + v));
} else {
appendInt(v, 0x1f);
}
@ -520,8 +526,7 @@ class Builder {
void addUInt(uint64_t v) {
if (v <= 9) {
reserveSpace(1);
_start[_pos++] = static_cast<uint8_t>(0x30 + v);
appendByte(static_cast<uint8_t>(0x30 + v));
} else {
appendUInt(v, 0x27);
}
@ -529,24 +534,9 @@ class Builder {
void addUTCDate(int64_t v) {
constexpr uint8_t vSize = sizeof(int64_t); // is always 8
reserveSpace(1 + vSize);
_start[_pos++] = 0x1c;
appendLength<vSize>(toUInt64(v));
}
uint8_t* addString(uint64_t strLen) {
if (strLen > 126) {
// long string
_start[_pos++] = 0xbf;
// write string length
appendLength<8>(strLen);
} else {
// short string
_start[_pos++] = static_cast<uint8_t>(0x40 + strLen);
}
uint8_t* target = _start + _pos;
_pos += strLen;
return target;
reserve(1 + vSize);
appendByteUnchecked(0x1c);
appendLengthUnchecked<vSize>(toUInt64(v));
}
public:
@ -628,9 +618,9 @@ class Builder {
if (translated != nullptr) {
Slice item(translated);
ValueLength const l = item.byteSize();
reserveSpace(l);
reserve(l);
memcpy(_start + _pos, translated, checkOverflow(l));
_pos += l;
advance(l);
_keyWritten = true;
return set(sub);
}
@ -678,9 +668,9 @@ class Builder {
if (translated != nullptr) {
Slice item(translated);
ValueLength const l = item.byteSize();
reserveSpace(l);
reserve(l);
memcpy(_start + _pos, translated, checkOverflow(l));
_pos += l;
advance(l);
_keyWritten = true;
return set(sub);
}
@ -700,16 +690,16 @@ class Builder {
}
void addCompoundValue(uint8_t type) {
reserveSpace(9);
reserve(9);
// an Array or Object is started:
_stack.push_back(_pos);
while (_stack.size() > _index.size()) {
_index.emplace_back();
}
_index[_stack.size() - 1].clear();
_start[_pos++] = type;
appendByteUnchecked(type);
memset(_start + _pos, 0, 8);
_pos += 8; // Will be filled later with bytelength and nr subs
advance(8); // Will be filled later with bytelength and nr subs
}
void openCompoundValue(uint8_t type) {
@ -748,27 +738,33 @@ class Builder {
_index[depth].pop_back();
}
void reportAdd() {
inline void reportAdd() {
size_t depth = _stack.size() - 1;
_index[depth].push_back(_pos - _stack[depth]);
}
template <uint64_t n>
void appendLength(ValueLength v) {
reserveSpace(n);
reserve(n);
appendLengthUnchecked<n>(v);
}
template <uint64_t n>
void appendLengthUnchecked(ValueLength v) {
for (uint64_t i = 0; i < n; ++i) {
_start[_pos++] = v & 0xff;
appendByteUnchecked(v & 0xff);
v >>= 8;
}
}
void appendUInt(uint64_t v, uint8_t base) {
reserveSpace(9);
ValueLength save = _pos++;
reserve(9);
ValueLength save = _pos;
advance(1);
uint8_t vSize = 0;
do {
vSize++;
_start[_pos++] = static_cast<uint8_t>(v & 0xff);
appendByteUnchecked(static_cast<uint8_t>(v & 0xff));
v >>= 8;
} while (v != 0);
_start[save] = base + vSize;
@ -800,13 +796,44 @@ class Builder {
x = v >= 0 ? static_cast<uint64_t>(v)
: static_cast<uint64_t>(v + shift) + shift;
}
reserveSpace(1 + vSize);
_start[_pos++] = base + vSize;
reserve(1 + vSize);
appendByteUnchecked(base + vSize);
while (vSize-- > 0) {
_start[_pos++] = x & 0xff;
appendByteUnchecked(x & 0xff);
x >>= 8;
}
}
inline void appendByte(uint8_t value) {
reserve(1);
appendByteUnchecked(value);
}
inline void appendByteUnchecked(uint8_t value) {
_start[_pos++] = value;
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->advance();
}
inline void resetTo(size_t value) {
_pos = value;
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->resetTo(value);
}
// move byte position x bytes ahead
inline void advance(size_t value) {
_pos += value;
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->advance(value);
}
// move byte position x bytes back
inline void rollback(size_t value) {
_pos -= value;
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->rollback(value);
}
void checkAttributeUniqueness(Slice const& obj) const;
};

View File

@ -40,8 +40,8 @@ namespace arangodb {
namespace velocypack {
struct NormalizedCompare {
static bool equalsNumbers(ValueType lhsType,
Slice lhs, Slice rhs) {
static bool equalsNumbers(Slice lhs, Slice rhs) {
auto lhsType = lhs.type();
if (lhsType == rhs.type()) {
// both types are equal
if (lhsType == ValueType::Int || lhsType == ValueType::SmallInt) {
@ -96,7 +96,7 @@ static bool equals(Slice lhs, Slice rhs) {
case ValueType::Int:
case ValueType::UInt:
case ValueType::SmallInt: {
return equalsNumbers(lhsType, lhs, rhs);
return equalsNumbers(lhs, rhs);
}
case ValueType::String: {
return equalsStrings(lhs, rhs);

View File

@ -106,7 +106,7 @@ class Dumper {
void appendString(std::string const& str) {
_sink->reserve(2 + str.size());
_sink->push_back('"');
dumpString(str.c_str(), str.size());
dumpString(str.data(), str.size());
_sink->push_back('"');
}

View File

@ -79,7 +79,8 @@ class Parser {
bool isInteger;
};
std::shared_ptr<Builder> _b;
std::shared_ptr<Builder> _builder;
Builder* _builderPtr;
uint8_t const* _start;
size_t _size;
size_t _pos;
@ -99,13 +100,14 @@ class Parser {
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
}
_b.reset(new Builder());
_b->options = options;
_builder.reset(new Builder());
_builderPtr = _builder.get();
_builderPtr->options = options;
}
explicit Parser(std::shared_ptr<Builder>& builder,
Options const* options = &Options::Defaults)
: _b(builder), _start(nullptr), _size(0), _pos(0), _nesting(0),
: _builder(builder), _builderPtr(_builder.get()), _start(nullptr), _size(0), _pos(0), _nesting(0),
options(options) {
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
@ -120,10 +122,11 @@ class Parser {
if (options == nullptr) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
}
_b.reset(&builder, BuilderNonDeleter());
_builder.reset(&builder, BuilderNonDeleter());
_builderPtr = _builder.get();
}
Builder const& builder() const { return *_b; }
Builder const& builder() const { return *_builderPtr; }
static std::shared_ptr<Builder> fromJson(
std::string const& json,
@ -150,7 +153,7 @@ class Parser {
}
ValueLength parse(std::string const& json, bool multi = false) {
return parse(reinterpret_cast<uint8_t const*>(json.c_str()), json.size(),
return parse(reinterpret_cast<uint8_t const*>(json.data()), json.size(),
multi);
}
@ -163,7 +166,7 @@ class Parser {
_size = size;
_pos = 0;
if (options->clearBuilderBeforeParse) {
_b->clear();
_builder->clear();
}
return parseInternal(multi);
}
@ -173,20 +176,21 @@ class Parser {
std::shared_ptr<Builder> steal() {
// Parser object is broken after a steal()
std::shared_ptr<Builder> res(_b);
_b.reset();
std::shared_ptr<Builder> res(_builder);
_builder.reset();
_builderPtr = nullptr;
return res;
}
// Beware, only valid as long as you do not parse more, use steal
// to move the data out!
uint8_t const* start() { return _b->start(); }
uint8_t const* start() { return _builderPtr->start(); }
// Returns the position at the time when the just reported error
// occurred, only use when handling an exception.
size_t errorPos() const { return _pos > 0 ? _pos - 1 : _pos; }
void clear() { _b->clear(); }
void clear() { _builderPtr->clear(); }
private:
inline int peek() const {
@ -222,7 +226,7 @@ class Parser {
if (consume() != 'r' || consume() != 'u' || consume() != 'e') {
throw Exception(Exception::ParseError, "Expecting 'true'");
}
_b->addTrue();
_builderPtr->addTrue();
}
void parseFalse() {
@ -231,7 +235,7 @@ class Parser {
consume() != 'e') {
throw Exception(Exception::ParseError, "Expecting 'false'");
}
_b->addFalse();
_builderPtr->addFalse();
}
void parseNull() {
@ -239,7 +243,7 @@ class Parser {
if (consume() != 'u' || consume() != 'l' || consume() != 'l') {
throw Exception(Exception::ParseError, "Expecting 'null'");
}
_b->addNull();
_builderPtr->addNull();
}
void scanDigits(ParsedNumber& value) {

View File

@ -69,7 +69,7 @@ class SliceScope;
class SliceStaticData {
friend class Slice;
static ValueLength const FixedTypeLengths[256];
static uint8_t const FixedTypeLengths[256];
static ValueType const TypeMap[256];
static unsigned int const WidthMap[32];
static unsigned int const FirstSubMap[32];
@ -425,13 +425,13 @@ class Slice {
throw Exception(Exception::InvalidValueType, "Expecting type Object");
}
Slice key = getNthKey(index, false);
Slice key = getNthKeyUntranslated(index);
return Slice(key.start() + key.byteSize());
}
// extract the nth value from an Object
Slice getNthValue(ValueLength index) const {
Slice key = getNthKey(index, false);
Slice key = getNthKeyUntranslated(index);
return Slice(key.start() + key.byteSize());
}
@ -731,7 +731,7 @@ class Slice {
ValueLength byteSize() const {
auto const h = head();
// check if the type has a fixed length first
ValueLength l = SliceStaticData::FixedTypeLengths[h];
ValueLength l = static_cast<ValueLength>(SliceStaticData::FixedTypeLengths[h]);
if (l != 0) {
// return fixed length
return l;
@ -746,15 +746,7 @@ class Slice {
return readVariableValueLength<false>(_start + 1);
}
if (h == 0x01 || h == 0x0a) {
// we cannot get here, because the FixedTypeLengths lookup
// above will have kicked in already. however, the compiler
// claims we'll be reading across the bounds of the input
// here...
return 1;
}
VELOCYPACK_ASSERT(h > 0x00 && h <= 0x0e);
VELOCYPACK_ASSERT(h > 0x01 && h <= 0x0e && h != 0x0a);
if (h >= sizeof(SliceStaticData::WidthMap) / sizeof(SliceStaticData::WidthMap[0])) {
throw Exception(Exception::InternalError, "invalid Array/Object type");
}
@ -762,20 +754,9 @@ class Slice {
SliceStaticData::WidthMap[h]);
}
case ValueType::External: {
return 1 + sizeof(char*);
}
case ValueType::UTCDate: {
return 1 + sizeof(int64_t);
}
case ValueType::Int: {
return static_cast<ValueLength>(1 + (h - 0x1f));
}
case ValueType::String: {
VELOCYPACK_ASSERT(h == 0xbf);
if (h < 0xbf) {
// we cannot get here, because the FixedTypeLengths lookup
// above will have kicked in already. however, the compiler
@ -783,6 +764,7 @@ class Slice {
// here...
return h - 0x40;
}
// long UTF-8 String
return static_cast<ValueLength>(
1 + 8 + readIntegerFixed<ValueLength, 8>(_start + 1));
@ -836,7 +818,7 @@ class Slice {
}
default: {
// fallthrough intentional
throw Exception(Exception::InternalError);
}
}
}
@ -877,7 +859,7 @@ class Slice {
int compareString(char const* value, size_t length) const;
inline int compareString(std::string const& attribute) const {
return compareString(attribute.c_str(), attribute.size());
return compareString(attribute.data(), attribute.size());
}
bool isEqualString(std::string const& attribute) const;
@ -942,6 +924,12 @@ class Slice {
// entry in the hash table for types 0x0b to 0x0e
Slice getNthKey(ValueLength index, bool translate) const;
// extract the nth member from an Object, no translation
inline Slice getNthKeyUntranslated(ValueLength index) const {
VELOCYPACK_ASSERT(type() == ValueType::Object);
return Slice(_start + getNthOffset(index));
}
// get the offset for the nth member from a compact Array or Object type
ValueLength getNthOffsetFromCompact(ValueLength index) const;

View File

@ -42,13 +42,13 @@ class StringRef {
constexpr StringRef() noexcept : _data(""), _length(0) {}
/// @brief create a StringRef from an std::string
explicit StringRef(std::string const& str) : StringRef(str.c_str(), str.size()) {}
explicit StringRef(std::string const& str) : StringRef(str.data(), str.size()) {}
/// @brief create a StringRef from a null-terminated C string
explicit StringRef(char const* data) : StringRef(data, strlen(data)) {}
explicit StringRef(char const* data) noexcept : StringRef(data, strlen(data)) {}
/// @brief create a StringRef from a VPack slice (must be of type String)
explicit StringRef(arangodb::velocypack::Slice const& slice) : StringRef() {
explicit StringRef(arangodb::velocypack::Slice const& slice) {
VELOCYPACK_ASSERT(slice.isString());
arangodb::velocypack::ValueLength l;
_data = slice.getString(l);
@ -56,28 +56,28 @@ class StringRef {
}
/// @brief create a StringRef from a C string plus length
StringRef(char const* data, size_t length) : _data(data), _length(length) {}
StringRef(char const* data, size_t length) noexcept : _data(data), _length(length) {}
/// @brief create a StringRef from another StringRef
StringRef(StringRef const& other) noexcept
: _data(other._data), _length(other._length) {}
/// @brief create a StringRef from another StringRef
StringRef& operator=(StringRef const& other) {
StringRef& operator=(StringRef const& other) noexcept {
_data = other._data;
_length = other._length;
return *this;
}
/// @brief create a StringRef from an std::string
StringRef& operator=(std::string const& other) {
_data = other.c_str();
StringRef& operator=(std::string const& other) noexcept {
_data = other.data();
_length = other.size();
return *this;
}
/// @brief create a StringRef from a null-terminated C string
StringRef& operator=(char const* other) {
StringRef& operator=(char const* other) noexcept {
_data = other;
_length = strlen(other);
return *this;
@ -91,15 +91,15 @@ class StringRef {
return *this;
}
int compare(std::string const& other) const {
int res = memcmp(_data, other.c_str(), (std::min)(_length, other.size()));
int compare(std::string const& other) const noexcept {
int res = memcmp(_data, other.data(), (std::min)(_length, other.size()));
if (res != 0) {
return res;
}
return static_cast<int>(_length) - static_cast<int>(other.size());
}
int compare(StringRef const& other) const {
int compare(StringRef const& other) const noexcept {
int res = memcmp(_data, other._data, (std::min)(_length, other._length));
if (res != 0) {
return res;
@ -111,7 +111,7 @@ class StringRef {
return std::string(_data, _length);
}
inline bool empty() const {
inline bool empty() const noexcept {
return (_length == 0);
}
@ -151,41 +151,6 @@ class StringRef {
}
}
/*
inline bool operator==(arangodb::velocypack::StringRef const& lhs, arangodb::velocypack::StringRef const& rhs) {
return (lhs.size() == rhs.size() && memcmp(lhs.data(), rhs.data(), lhs.size()) == 0);
}
inline bool operator!=(arangodb::velocypack::StringRef const& lhs, arangodb::velocypack::StringRef const& rhs) {
return !(lhs == rhs);
}
inline bool operator==(arangodb::velocypack::StringRef const& lhs, std::string const& rhs) {
return (lhs.size() == rhs.size() && memcmp(lhs.data(), rhs.c_str(), lhs.size()) == 0);
}
inline bool operator!=(arangodb::velocypack::StringRef const& lhs, std::string const& rhs) {
return !(lhs == rhs);
}
inline bool operator==(arangodb::velocypack::StringRef const& lhs, char const* rhs) {
size_t const len = strlen(rhs);
return (lhs.size() == len && memcmp(lhs.data(), rhs, lhs.size()) == 0);
}
inline bool operator!=(arangodb::velocypack::StringRef const& lhs, char const* rhs) {
return !(lhs == rhs);
}
inline bool operator<(arangodb::StringRef const& lhs, arangodb::StringRef const& rhs) {
return (lhs.compare(rhs) < 0);
}
inline bool operator>(arangodb::StringRef const& lhs, arangodb::StringRef const& rhs) {
return (lhs.compare(rhs) > 0);
}
*/
namespace std {
template <>

View File

@ -34,7 +34,7 @@
namespace arangodb {
namespace velocypack {
enum class ValueType {
enum class ValueType : uint8_t {
None, // not yet initialized
Illegal, // illegal value
Null, // JSON null

View File

@ -89,7 +89,7 @@ bool assemblerFunctionsDisabled();
std::size_t checkOverflow(ValueLength);
#else
// on a 64 bit platform, the following function is probably a no-op
static constexpr std::size_t checkOverflow(ValueLength length) {
static inline constexpr std::size_t checkOverflow(ValueLength length) {
return static_cast<std::size_t>(length);
}
#endif

View File

@ -161,7 +161,7 @@ void Builder::removeLast() {
if (index.empty()) {
throw Exception(Exception::BuilderNeedSubvalue);
}
_pos = tos + index.back();
resetTo(tos + index.back());
index.pop_back();
}
@ -169,7 +169,7 @@ Builder& Builder::closeEmptyArrayOrObject(ValueLength tos, bool isArray) {
// empty Array or Object
_start[tos] = (isArray ? 0x01 : 0x0a);
VELOCYPACK_ASSERT(_pos == tos + 9);
_pos -= 8; // no bytelength and number subvalues needed
rollback(8); // no bytelength and number subvalues needed
_stack.pop_back();
// Intentionally leave _index[depth] intact to avoid future allocs!
return *this;
@ -208,13 +208,13 @@ bool Builder::closeCompactArrayOrObject(ValueLength tos, bool isArray,
// need additional memory for storing the number of values
if (nLen > 8 - bLen) {
reserveSpace(nLen);
reserve(nLen);
}
storeVariableValueLength<true>(_start + tos + byteSize - 1,
static_cast<ValueLength>(index.size()));
_pos -= 8;
_pos += nLen + bLen;
rollback(8);
advance(nLen + bLen);
_stack.pop_back();
return true;
@ -301,7 +301,7 @@ Builder& Builder::closeArray(ValueLength tos, std::vector<ValueLength>& index) {
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
}
ValueLength const diff = 9 - targetPos;
_pos -= diff;
rollback(diff);
if (needIndexTable) {
size_t const n = index.size();
for (size_t i = 0; i < n; i++) {
@ -317,9 +317,9 @@ Builder& Builder::closeArray(ValueLength tos, std::vector<ValueLength>& index) {
// Now build the table:
if (needIndexTable) {
ValueLength tableBase;
reserveSpace(offsetSize * index.size() + (offsetSize == 8 ? 8 : 0));
reserve(offsetSize * index.size() + (offsetSize == 8 ? 8 : 0));
tableBase = _pos;
_pos += offsetSize * index.size();
advance(offsetSize * index.size());
for (size_t i = 0; i < index.size(); i++) {
uint64_t x = index[i];
for (size_t j = 0; j < offsetSize; j++) {
@ -423,7 +423,7 @@ Builder& Builder::close() {
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
}
ValueLength const diff = 9 - targetPos;
_pos -= diff;
rollback(diff);
size_t const n = index.size();
for (size_t i = 0; i < n; i++) {
index[i] -= diff;
@ -439,9 +439,9 @@ Builder& Builder::close() {
}
// Now build the table:
reserveSpace(offsetSize * index.size() + (offsetSize == 8 ? 8 : 0));
reserve(offsetSize * index.size() + (offsetSize == 8 ? 8 : 0));
ValueLength tableBase = _pos;
_pos += offsetSize * index.size();
advance(offsetSize * index.size());
// Object
if (index.size() >= 2) {
sortObjectIndex(_start + tos, index);
@ -552,8 +552,7 @@ uint8_t* Builder::set(Value const& item) {
"Cannot set a ValueType::None");
}
case ValueType::Null: {
reserveSpace(1);
_start[_pos++] = 0x18;
appendByte(0x18);
break;
}
case ValueType::Bool: {
@ -561,11 +560,10 @@ uint8_t* Builder::set(Value const& item) {
throw Exception(Exception::BuilderUnexpectedValue,
"Must give bool for ValueType::Bool");
}
reserveSpace(1);
if (item.getBool()) {
_start[_pos++] = 0x1a;
appendByte(0x1a);
} else {
_start[_pos++] = 0x19;
appendByte(0x19);
}
break;
}
@ -588,10 +586,10 @@ uint8_t* Builder::set(Value const& item) {
throw Exception(Exception::BuilderUnexpectedValue,
"Must give number for ValueType::Double");
}
reserveSpace(1 + sizeof(double));
_start[_pos++] = 0x1b;
reserve(1 + sizeof(double));
appendByteUnchecked(0x1b);
memcpy(&x, &v, sizeof(double));
appendLength<sizeof(double)>(x);
appendLengthUnchecked<sizeof(double)>(x);
break;
}
case ValueType::External: {
@ -604,12 +602,12 @@ uint8_t* Builder::set(Value const& item) {
throw Exception(Exception::BuilderUnexpectedValue,
"Must give void pointer for ValueType::External");
}
reserveSpace(1 + sizeof(void*));
reserve(1 + sizeof(void*));
// store pointer. this doesn't need to be portable
_start[_pos++] = 0x1d;
appendByteUnchecked(0x1d);
void const* value = item.getExternal();
memcpy(_start + _pos, &value, sizeof(void*));
_pos += sizeof(void*);
advance(sizeof(void*));
break;
}
case ValueType::SmallInt: {
@ -632,11 +630,10 @@ uint8_t* Builder::set(Value const& item) {
throw Exception(Exception::NumberOutOfRange,
"Number out of range of ValueType::SmallInt");
}
reserveSpace(1);
if (vv >= 0) {
_start[_pos++] = static_cast<uint8_t>(vv + 0x30);
appendByte(static_cast<uint8_t>(vv + 0x30));
} else {
_start[_pos++] = static_cast<uint8_t>(vv + 0x40);
appendByte(static_cast<uint8_t>(vv + 0x40));
}
break;
}
@ -713,33 +710,32 @@ uint8_t* Builder::set(Value const& item) {
size_t const size = s->size();
if (size <= 126) {
// short string
reserveSpace(1 + size);
_start[_pos++] = static_cast<uint8_t>(0x40 + size);
memcpy(_start + _pos, s->c_str(), size);
reserve(1 + size);
appendByteUnchecked(static_cast<uint8_t>(0x40 + size));
memcpy(_start + _pos, s->data(), size);
} else {
// long string
reserveSpace(1 + 8 + size);
_start[_pos++] = 0xbf;
appendLength<8>(size);
memcpy(_start + _pos, s->c_str(), size);
reserve(1 + 8 + size);
appendByteUnchecked(0xbf);
appendLengthUnchecked<8>(size);
memcpy(_start + _pos, s->data(), size);
}
_pos += size;
advance(size);
} else if (ctype == Value::CType::CharPtr) {
char const* p = item.getCharPtr();
size_t const size = strlen(p);
if (size <= 126) {
// short string
reserveSpace(1 + size);
_start[_pos++] = static_cast<uint8_t>(0x40 + size);
memcpy(_start + _pos, p, size);
reserve(1 + size);
appendByteUnchecked(static_cast<uint8_t>(0x40 + size));
} else {
// long string
reserveSpace(1 + 8 + size);
_start[_pos++] = 0xbf;
appendLength<8>(size);
memcpy(_start + _pos, p, size);
reserve(1 + 8 + size);
appendByteUnchecked(0xbf);
appendLengthUnchecked<8>(size);
}
_pos += size;
memcpy(_start + _pos, p, size);
advance(size);
} else {
throw Exception(
Exception::BuilderUnexpectedValue,
@ -761,33 +757,31 @@ uint8_t* Builder::set(Value const& item) {
Exception::BuilderUnexpectedValue,
"Must provide std::string or char const* for ValueType::Binary");
}
std::string const* s;
std::string value;
char const* p;
ValueLength size;
if (ctype == Value::CType::String) {
s = item.getString();
p = item.getString()->data();
size = item.getString()->size();
} else {
value = item.getCharPtr();
s = &value;
p = item.getCharPtr();
size = strlen(p);
}
ValueLength v = s->size();
appendUInt(v, 0xbf);
memcpy(_start + _pos, s->c_str(), checkOverflow(v));
_pos += v;
appendUInt(size, 0xbf);
reserve(size);
memcpy(_start + _pos, p, checkOverflow(size));
advance(size);
break;
}
case ValueType::Illegal: {
reserveSpace(1);
_start[_pos++] = 0x17;
appendByte(0x17);
break;
}
case ValueType::MinKey: {
reserveSpace(1);
_start[_pos++] = 0x1e;
appendByte(0x1e);
break;
}
case ValueType::MaxKey: {
reserveSpace(1);
_start[_pos++] = 0x1f;
appendByte(0x1f);
break;
}
case ValueType::BCD: {
@ -805,9 +799,9 @@ uint8_t* Builder::set(Slice const& item) {
checkKeyIsString(item.isString());
ValueLength const l = item.byteSize();
reserveSpace(l);
reserve(l);
memcpy(_start + _pos, item.start(), checkOverflow(l));
_pos += l;
advance(l);
return _start + _pos - l;
}
@ -823,37 +817,35 @@ uint8_t* Builder::set(ValuePair const& pair) {
if (pair.valueType() == ValueType::Binary) {
uint64_t v = pair.getSize();
reserveSpace(9 + v);
reserve(9 + v);
appendUInt(v, 0xbf);
memcpy(_start + _pos, pair.getStart(), checkOverflow(v));
_pos += v;
advance(v);
return _start + oldPos;
} else if (pair.valueType() == ValueType::String) {
uint64_t size = pair.getSize();
if (size > 126) {
// long string
reserveSpace(1 + 8 + size);
_start[_pos++] = 0xbf;
appendLength<8>(size);
memcpy(_start + _pos, pair.getStart(), checkOverflow(size));
_pos += size;
reserve(1 + 8 + size);
appendByteUnchecked(0xbf);
appendLengthUnchecked<8>(size);
} else {
// short string
reserveSpace(1 + size);
_start[_pos++] = static_cast<uint8_t>(0x40 + size);
memcpy(_start + _pos, pair.getStart(), checkOverflow(size));
_pos += size;
reserve(1 + size);
appendByteUnchecked(static_cast<uint8_t>(0x40 + size));
}
memcpy(_start + _pos, pair.getStart(), checkOverflow(size));
advance(size);
return _start + oldPos;
} else if (pair.valueType() == ValueType::Custom) {
// We only reserve space here, the caller has to fill in the custom type
uint64_t size = pair.getSize();
reserveSpace(size);
reserve(size);
uint8_t const* p = pair.getStart();
if (p != nullptr) {
memcpy(_start + _pos, p, checkOverflow(size));
}
_pos += size;
advance(size);
return _start + _pos - size;
}
throw Exception(Exception::BuilderUnexpectedType,
@ -905,42 +897,6 @@ void Builder::checkAttributeUniqueness(Slice const& obj) const {
}
}
uint8_t* Builder::add(std::string const& attrName, Value const& sub) {
return addInternal<Value>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, Value const& sub) {
return addInternal<Value>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, size_t attrLength, Value const& sub) {
return addInternal<Value>(attrName, attrLength, sub);
}
uint8_t* Builder::add(std::string const& attrName, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, size_t attrLength, ValuePair const& sub) {
return addInternal<ValuePair>(attrName, attrLength, sub);
}
uint8_t* Builder::add(std::string const& attrName, Slice const& sub) {
return addInternal<Slice>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, Slice const& sub) {
return addInternal<Slice>(attrName, sub);
}
uint8_t* Builder::add(char const* attrName, size_t attrLength, Slice const& sub) {
return addInternal<Slice>(attrName, attrLength, sub);
}
// Add all subkeys and subvalues into an object from an ObjectIterator
// and leaves open the object intentionally
uint8_t* Builder::add(ObjectIterator& sub) {

View File

@ -70,22 +70,7 @@ namespace arangodb {
namespace velocypack {
std::ostream& operator<<(std::ostream& stream, HexDump const& hexdump) {
int current = 0;
for (uint8_t it : hexdump.slice) {
if (current != 0) {
stream << hexdump.separator;
if (hexdump.valuesPerLine > 0 && current == hexdump.valuesPerLine) {
stream << std::endl;
current = 0;
}
}
stream << HexDump::toHex(it);
++current;
}
stream << hexdump.toString();
return stream;
}

View File

@ -34,7 +34,7 @@ using namespace arangodb::velocypack;
// The following function does the actual parse. It gets bytes
// via peek, consume and reset appends the result to the Builder
// in *_b. Errors are reported via an exception.
// in *_builderPtr. Errors are reported via an exception.
// Behind the scenes it runs two parses, one to collect sizes and
// check for parse errors (scan phase) and then one to actually
// build the result (build phase).
@ -50,18 +50,18 @@ ValueLength Parser::parseInternal(bool multi) {
ValueLength nr = 0;
do {
bool haveReported = false;
if (!_b->_stack.empty()) {
ValueLength const tos = _b->_stack.back();
if (_b->_start[tos] == 0x0b || _b->_start[tos] == 0x14) {
if (! _b->_keyWritten) {
if (!_builderPtr->_stack.empty()) {
ValueLength const tos = _builderPtr->_stack.back();
if (_builderPtr->_start[tos] == 0x0b || _builderPtr->_start[tos] == 0x14) {
if (!_builderPtr->_keyWritten) {
throw Exception(Exception::BuilderKeyMustBeString);
}
else {
_b->_keyWritten = false;
_builderPtr->_keyWritten = false;
}
}
else {
_b->reportAdd();
_builderPtr->reportAdd();
haveReported = true;
}
}
@ -70,7 +70,7 @@ ValueLength Parser::parseInternal(bool multi) {
}
catch (...) {
if (haveReported) {
_b->cleanupAdd();
_builderPtr->cleanupAdd();
}
throw;
}
@ -148,20 +148,20 @@ void Parser::parseNumber() {
}
if (!numberValue.isInteger) {
if (negative) {
_b->addDouble(-numberValue.doubleValue);
_builderPtr->addDouble(-numberValue.doubleValue);
} else {
_b->addDouble(numberValue.doubleValue);
_builderPtr->addDouble(numberValue.doubleValue);
}
} else if (negative) {
if (numberValue.intValue <= static_cast<uint64_t>(INT64_MAX)) {
_b->addInt(-static_cast<int64_t>(numberValue.intValue));
_builderPtr->addInt(-static_cast<int64_t>(numberValue.intValue));
} else if (numberValue.intValue == toUInt64(INT64_MIN)) {
_b->addInt(INT64_MIN);
_builderPtr->addInt(INT64_MIN);
} else {
_b->addDouble(-static_cast<double>(numberValue.intValue));
_builderPtr->addDouble(-static_cast<double>(numberValue.intValue));
}
} else {
_b->addUInt(numberValue.intValue);
_builderPtr->addUInt(numberValue.intValue);
}
return;
}
@ -182,7 +182,7 @@ void Parser::parseNumber() {
}
i = consume();
if (i < 0) {
_b->addDouble(fractionalPart);
_builderPtr->addDouble(fractionalPart);
return;
}
} else {
@ -196,8 +196,8 @@ void Parser::parseNumber() {
unconsume();
// use conventional atof() conversion here, to avoid precision loss
// when interpreting and multiplying the single digits of the input stream
// _b->addDouble(fractionalPart);
_b->addDouble(atof(reinterpret_cast<char const*>(_start) + startPos));
// _builderPtr->addDouble(fractionalPart);
_builderPtr->addDouble(atof(reinterpret_cast<char const*>(_start) + startPos));
return;
}
i = getOneOrThrow("Incomplete number");
@ -222,8 +222,8 @@ void Parser::parseNumber() {
}
// use conventional atof() conversion here, to avoid precision loss
// when interpreting and multiplying the single digits of the input stream
// _b->addDouble(fractionalPart);
_b->addDouble(atof(reinterpret_cast<char const*>(_start) + startPos));
// _builderPtr->addDouble(fractionalPart);
_builderPtr->addDouble(atof(reinterpret_cast<char const*>(_start) + startPos));
}
void Parser::parseString() {
@ -232,16 +232,8 @@ void Parser::parseString() {
// VPack representation. We assume that the string is short and
// insert 8 bytes for the length as soon as we reach 127 bytes
// in the VPack representation.
// copy builder pointer into local variable
// this avoids accessing the shared pointer repeatedly, which has
// a small but non-negligible cost
Builder* builder = _b.get();
VELOCYPACK_ASSERT(builder != nullptr);
ValueLength const base = builder->_pos;
builder->reserveSpace(1);
builder->_start[builder->_pos++] = 0x40; // correct this later
ValueLength const base = _builderPtr->_pos;
_builderPtr->appendByte(0x40); // correct this later
bool large = false; // set to true when we reach 128 bytes
uint32_t highSurrogate = 0; // non-zero if high-surrogate was seen
@ -249,42 +241,42 @@ void Parser::parseString() {
while (true) {
size_t remainder = _size - _pos;
if (remainder >= 16) {
builder->reserveSpace(remainder);
_builderPtr->reserve(remainder);
size_t count;
// Note that the SSE4.2 accelerated string copying functions might
// peek up to 15 bytes over the given end, because they use 128bit
// registers. Therefore, we have to subtract 15 from remainder
// to be on the safe side. Further bytes will be processed below.
if (options->validateUtf8Strings) {
count = JSONStringCopyCheckUtf8(builder->_start + builder->_pos, _start + _pos,
count = JSONStringCopyCheckUtf8(_builderPtr->_start + _builderPtr->_pos, _start + _pos,
remainder - 15);
} else {
count = JSONStringCopy(builder->_start + builder->_pos, _start + _pos,
count = JSONStringCopy(_builderPtr->_start + _builderPtr->_pos, _start + _pos,
remainder - 15);
}
_pos += count;
builder->_pos += count;
_builderPtr->advance(count);
}
int i = getOneOrThrow("Unfinished string");
if (!large && builder->_pos - (base + 1) > 126) {
if (!large && _builderPtr->_pos - (base + 1) > 126) {
large = true;
builder->reserveSpace(8);
ValueLength len = builder->_pos - (base + 1);
memmove(builder->_start + base + 9, builder->_start + base + 1, checkOverflow(len));
builder->_pos += 8;
_builderPtr->reserve(8);
ValueLength len = _builderPtr->_pos - (base + 1);
memmove(_builderPtr->_start + base + 9, _builderPtr->_start + base + 1, checkOverflow(len));
_builderPtr->advance(8);
}
switch (i) {
case '"':
ValueLength len;
if (!large) {
len = builder->_pos - (base + 1);
builder->_start[base] = 0x40 + static_cast<uint8_t>(len);
len = _builderPtr->_pos - (base + 1);
_builderPtr->_start[base] = 0x40 + static_cast<uint8_t>(len);
// String is ready
} else {
len = builder->_pos - (base + 9);
builder->_start[base] = 0xbf;
len = _builderPtr->_pos - (base + 9);
_builderPtr->_start[base] = 0xbf;
for (ValueLength i = 1; i <= 8; i++) {
builder->_start[base + i] = len & 0xff;
_builderPtr->_start[base + i] = len & 0xff;
len >>= 8;
}
}
@ -299,33 +291,27 @@ void Parser::parseString() {
case '"':
case '/':
case '\\':
builder->reserveSpace(1);
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
_builderPtr->appendByte(static_cast<uint8_t>(i));
highSurrogate = 0;
break;
case 'b':
builder->reserveSpace(1);
builder->_start[builder->_pos++] = '\b';
_builderPtr->appendByte('\b');
highSurrogate = 0;
break;
case 'f':
builder->reserveSpace(1);
builder->_start[builder->_pos++] = '\f';
_builderPtr->appendByte('\f');
highSurrogate = 0;
break;
case 'n':
builder->reserveSpace(1);
builder->_start[builder->_pos++] = '\n';
_builderPtr->appendByte('\n');
highSurrogate = 0;
break;
case 'r':
builder->reserveSpace(1);
builder->_start[builder->_pos++] = '\r';
_builderPtr->appendByte('\r');
highSurrogate = 0;
break;
case 't':
builder->reserveSpace(1);
builder->_start[builder->_pos++] = '\t';
_builderPtr->appendByte('\t');
highSurrogate = 0;
break;
case 'u': {
@ -348,23 +334,22 @@ void Parser::parseString() {
}
}
if (v < 0x80) {
builder->reserveSpace(1);
builder->_start[builder->_pos++] = static_cast<uint8_t>(v);
_builderPtr->appendByte(static_cast<uint8_t>(v));
highSurrogate = 0;
} else if (v < 0x800) {
builder->reserveSpace(2);
builder->_start[builder->_pos++] = 0xc0 + (v >> 6);
builder->_start[builder->_pos++] = 0x80 + (v & 0x3f);
_builderPtr->reserve(2);
_builderPtr->appendByteUnchecked(0xc0 + (v >> 6));
_builderPtr->appendByteUnchecked(0x80 + (v & 0x3f));
highSurrogate = 0;
} else if (v >= 0xdc00 && v < 0xe000 && highSurrogate != 0) {
// Low surrogate, put the two together:
v = 0x10000 + ((highSurrogate - 0xd800) << 10) + v - 0xdc00;
builder->_pos -= 3;
builder->reserveSpace(4);
builder->_start[builder->_pos++] = 0xf0 + (v >> 18);
builder->_start[builder->_pos++] = 0x80 + ((v >> 12) & 0x3f);
builder->_start[builder->_pos++] = 0x80 + ((v >> 6) & 0x3f);
builder->_start[builder->_pos++] = 0x80 + (v & 0x3f);
_builderPtr->rollback(3);
_builderPtr->reserve(4);
_builderPtr->appendByteUnchecked(0xf0 + (v >> 18));
_builderPtr->appendByteUnchecked(0x80 + ((v >> 12) & 0x3f));
_builderPtr->appendByteUnchecked(0x80 + ((v >> 6) & 0x3f));
_builderPtr->appendByteUnchecked(0x80 + (v & 0x3f));
highSurrogate = 0;
} else {
if (v >= 0xd800 && v < 0xdc00) {
@ -373,10 +358,10 @@ void Parser::parseString() {
} else {
highSurrogate = 0;
}
builder->reserveSpace(3);
builder->_start[builder->_pos++] = 0xe0 + (v >> 12);
builder->_start[builder->_pos++] = 0x80 + ((v >> 6) & 0x3f);
builder->_start[builder->_pos++] = 0x80 + (v & 0x3f);
_builderPtr->reserve(3);
_builderPtr->appendByteUnchecked(0xe0 + (v >> 12));
_builderPtr->appendByteUnchecked(0x80 + ((v >> 6) & 0x3f));
_builderPtr->appendByteUnchecked(0x80 + (v & 0x3f));
}
break;
}
@ -392,13 +377,11 @@ void Parser::parseString() {
throw Exception(Exception::UnexpectedControlCharacter);
}
highSurrogate = 0;
builder->reserveSpace(1);
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
_builderPtr->appendByte(static_cast<uint8_t>(i));
} else {
if (!options->validateUtf8Strings) {
highSurrogate = 0;
builder->reserveSpace(1);
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
_builderPtr->appendByte(static_cast<uint8_t>(i));
} else {
// multi-byte UTF-8 sequence!
int follow = 0;
@ -418,14 +401,14 @@ void Parser::parseString() {
}
// validate follow up characters
builder->reserveSpace(1 + follow);
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
_builderPtr->reserve(1 + follow);
_builderPtr->appendByteUnchecked(static_cast<uint8_t>(i));
for (int j = 0; j < follow; ++j) {
i = getOneOrThrow("scanString: truncated UTF-8 sequence");
if ((i & 0xc0) != 0x80) {
throw Exception(Exception::InvalidUtf8Sequence);
}
builder->_start[builder->_pos++] = static_cast<uint8_t>(i);
_builderPtr->appendByteUnchecked(static_cast<uint8_t>(i));
}
highSurrogate = 0;
}
@ -436,19 +419,13 @@ void Parser::parseString() {
}
void Parser::parseArray() {
// copy builder pointer into local variable
// this avoids accessing the shared pointer repeatedly, which has
// a small but non-negligible cost
Builder* builder = _b.get();
VELOCYPACK_ASSERT(builder != nullptr);
builder->addArray();
_builderPtr->addArray();
int i = skipWhiteSpace("Expecting item or ']'");
if (i == ']') {
// empty array
++_pos; // the closing ']'
builder->close();
_builderPtr->close();
return;
}
@ -456,13 +433,13 @@ void Parser::parseArray() {
while (true) {
// parse array element itself
builder->reportAdd();
_builderPtr->reportAdd();
parseJson();
i = skipWhiteSpace("Expecting ',' or ']'");
if (i == ']') {
// end of array
++_pos; // the closing ']'
builder->close();
_builderPtr->close();
decreaseNesting();
return;
}
@ -478,13 +455,7 @@ void Parser::parseArray() {
}
void Parser::parseObject() {
// copy builder pointer into local variable
// this avoids accessing the shared pointer repeatedly, which has
// a small but non-negligible cost
Builder* builder = _b.get();
VELOCYPACK_ASSERT(builder != nullptr);
builder->addObject();
_builderPtr->addObject();
int i = skipWhiteSpace("Expecting item or '}'");
if (i == '}') {
@ -493,7 +464,7 @@ void Parser::parseObject() {
if (_nesting != 0 || !options->keepTopLevelOpen) {
// only close if we've not been asked to keep top level open
builder->close();
_builderPtr->close();
}
return;
}
@ -508,22 +479,22 @@ void Parser::parseObject() {
// get past the initial '"'
++_pos;
builder->reportAdd();
_builderPtr->reportAdd();
bool excludeAttribute = false;
auto const lastPos = builder->_pos;
auto const lastPos = _builderPtr->_pos;
if (options->attributeExcludeHandler == nullptr) {
parseString();
} else {
parseString();
if (options->attributeExcludeHandler->shouldExclude(
Slice(builder->_start + lastPos), _nesting)) {
Slice(_builderPtr->_start + lastPos), _nesting)) {
excludeAttribute = true;
}
}
if (!excludeAttribute && options->attributeTranslator != nullptr) {
// check if a translation for the attribute name exists
Slice key(builder->_start + lastPos);
Slice key(_builderPtr->_start + lastPos);
if (key.isString()) {
ValueLength keyLength;
@ -535,8 +506,8 @@ void Parser::parseObject() {
// found translation... now reset position to old key position
// and simply overwrite the existing key with the numeric translation
// id
builder->_pos = lastPos;
builder->addUInt(Slice(translated).getUInt());
_builderPtr->resetTo(lastPos);
_builderPtr->addUInt(Slice(translated).getUInt());
}
}
}
@ -551,7 +522,7 @@ void Parser::parseObject() {
parseJson();
if (excludeAttribute) {
builder->removeLast();
_builderPtr->removeLast();
}
i = skipWhiteSpace("Expecting ',' or '}'");
@ -560,7 +531,7 @@ void Parser::parseObject() {
++_pos; // the closing '}'
if (_nesting != 1 || !options->keepTopLevelOpen) {
// only close if we've not been asked to keep top level open
builder->close();
_builderPtr->close();
}
decreaseNesting();
return;

View File

@ -39,7 +39,7 @@
using namespace arangodb::velocypack;
using VT = arangodb::velocypack::ValueType;
ValueLength const SliceStaticData::FixedTypeLengths[256] = {
uint8_t const SliceStaticData::FixedTypeLengths[256] = {
/* 0x00 */ 1, /* 0x01 */ 1,
/* 0x02 */ 0, /* 0x03 */ 0,
/* 0x04 */ 0, /* 0x05 */ 0,
@ -512,10 +512,7 @@ Slice Slice::get(std::string const& attribute) const {
// otherwise we'll always use the linear search
constexpr ValueLength SortedSearchEntriesThreshold = 4;
// bool const isSorted = (h >= 0x0b && h <= 0x0e);
if (n >= SortedSearchEntriesThreshold && (h >= 0x0b && h <= 0x0e)) {
// This means, we have to handle the special case n == 1 only
// in the linear search!
switch (offsetSize) {
case 1:
return searchObjectKeyBinary<1>(attribute, ieBase, n);
@ -666,7 +663,7 @@ bool Slice::isEqualString(std::string const& attribute) const {
if (static_cast<size_t>(keyLength) != attribute.size()) {
return false;
}
return (memcmp(k, attribute.c_str(), attribute.size()) == 0);
return (memcmp(k, attribute.data(), attribute.size()) == 0);
}
Slice Slice::getFromCompactObject(std::string const& attribute) const {

View File

@ -55,14 +55,13 @@ static const uint8_t states[] = {
bool Utf8Helper::isValidUtf8(uint8_t const* p, ValueLength len) {
uint8_t const* end = p + len;
uint8_t state = ValidChar;
while (p < end) {
state = states[256 + state * 16 + states[*p]];
state = states[256 + state * 16 + states[*p++]];
if (state == InvalidChar) {
return false;
}
++p;
}
return (state == ValidChar);

View File

@ -36,12 +36,11 @@ static inline size_t JSONStringCopyInline(uint8_t* dst, uint8_t const* src,
// Stop at the first control character or backslash or double quote.
// Report the number of bytes copied. May copy less bytes, for example
// for alignment reasons.
size_t count = limit;
while (count > 0 && *src >= 32 && *src != '\\' && *src != '"') {
uint8_t const* end = src + limit;
while (src < end && *src >= 32 && *src != '\\' && *src != '"') {
*dst++ = *src++;
count--;
}
return limit - count;
return limit - (end - src);
}
size_t JSONStringCopyC(uint8_t* dst, uint8_t const* src, size_t limit);

View File

@ -12,6 +12,21 @@ devel
* UI: updated dygraph js library to version 2.1.0
* honor specified COLLECT method in AQL COLLECT options
for example, when the user explicitly asks for the COLLECT method
to be `sorted`, the optimizer will now not produce an alternative
version of the plan using the hash method.
additionally, if the user explcitly asks for the COLLECT method to
be `hash`, the optimizer will now change the existing plan to use
the hash method if possible instead of just creating an alternative
plan.
`COLLECT ... OPTIONS { method: 'sorted' }` => always use sorted method
`COLLECT ... OPTIONS { method: 'hash' }` => use hash if this is technically possible
`COLLECT ...` (no options) => create a plan using sorted, and another plan using hash method
* added C++ implementation for AQL function `SHA512()`
* added AQL function `IS_KEY`

View File

@ -259,7 +259,7 @@ To ensure correctness of the result, the AQL optimizer will automatically insert
statement into the query in front of the *COLLECT* statement. The optimizer may be able to
optimize away that *SORT* statement later if a sorted index is present on the group criteria.
In case a *COLLECT* qualifies for using the *hash* variant, the optimizer will create an extra
In case a *COLLECT* statement qualifies for using the *hash* variant, the optimizer will create an extra
plan for it at the beginning of the planning phase. In this plan, no extra *SORT* statement will be
added in front of the *COLLECT*. This is because the *hash* variant of *COLLECT* does not require
sorted input. Instead, a *SORT* statement will be added after the *COLLECT* to sort its output.
@ -300,9 +300,13 @@ the *sorted* variant of *COLLECT* and not even create a plan using the *hash* va
OPTIONS { method: "sorted" }
```
Note that specifying *hash* as method will not make the optimizer use the *hash* variant. This is
because the *hash* variant is not eligible for all queries. Instead, if no options or any other method
than *sorted* are specified in *OPTIONS*, the optimizer will use its regular cost estimations.
It is also possible to specify *hash* as the preferred method. In this case the optimizer will create
a plan using the *hash* method only if the COLLECT statement qualifies (not all COLLECT statements
can use the *hash* method). In case the COLLECT statement qualifies, there will be only a one plan
that uses the *hash* method. If it does not qualify, the optimizer will use the *sorted* method.
If no method is specified, then the optimizer will create a plan that uses the *sorted* method, and
an additional plan using the *hash* method if the COLLECT statement qualifies for it.
### COLLECT vs. RETURN DISTINCT

View File

@ -250,7 +250,7 @@ class AgencyCommResult {
void clear();
VPackSlice slice() const;
void setVPack(std::shared_ptr<velocypack::Builder> vpack) { _vpack = vpack; }
void setVPack(std::shared_ptr<velocypack::Builder> const& vpack) { _vpack = vpack; }
public:
std::string _location;

View File

@ -27,7 +27,7 @@ using namespace arangodb::consensus;
using namespace arangodb::velocypack;
StoreCallback::StoreCallback(std::string const& path, std::string const& body)
: _path(path) , _body(body){}
: _path(path) , _body(body) {}
bool StoreCallback::operator()(arangodb::ClusterCommResult* res) {
if (res->status != CL_COMM_SENT) {

View File

@ -33,9 +33,7 @@ class StoreCallback : public arangodb::ClusterCommCallback {
public:
StoreCallback(std::string const&, std::string const&);
virtual bool operator()(arangodb::ClusterCommResult*) override final;
void shutdown();
bool operator()(arangodb::ClusterCommResult*) override final;
private:

View File

@ -33,7 +33,6 @@ namespace arangodb {
namespace transaction {
class Methods;
}
;
namespace aql {

View File

@ -129,7 +129,6 @@ class AqlItemBlock {
TRI_ASSERT(_data[index * _nrRegs + varNr].isEmpty());
void* p = &_data[index * _nrRegs + varNr];
size_t mem = 0;
// construct the AqlValue in place
AqlValue* value;
try {
@ -144,8 +143,7 @@ class AqlItemBlock {
// Now update the reference count, if this fails, we'll roll it back
if (value->requiresDestruction()) {
if (++_valueCount[*value] == 1) {
mem = value->memoryUsage();
increaseMemoryUsage(mem);
increaseMemoryUsage(value->memoryUsage());
}
}
} catch (...) {

View File

@ -32,7 +32,6 @@ namespace arangodb {
namespace transaction {
class Methods;
}
;
namespace aql {

View File

@ -34,6 +34,7 @@ namespace arangodb {
namespace transaction {
class Methods;
}
struct ClusterCommResult;
namespace aql {

View File

@ -58,7 +58,6 @@ void RemoteNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
nodes.close();
}
/// @brief estimateCost
double RemoteNode::estimateCost(size_t& nrItems) const {
if (_dependencies.size() == 1) {

View File

@ -30,7 +30,7 @@ using namespace arangodb::aql;
/// @brief constructor
CollectOptions::CollectOptions(VPackSlice const& slice)
: method(COLLECT_METHOD_UNDEFINED) {
: method(CollectMethod::UNDEFINED) {
VPackSlice v = slice.get("collectOptions");
if (v.isObject()) {
v = v.get("method");
@ -40,16 +40,15 @@ CollectOptions::CollectOptions(VPackSlice const& slice)
}
}
/// @brief whether or not the hash method can be used
bool CollectOptions::canUseHashMethod() const {
if (method == CollectMethod::COLLECT_METHOD_SORTED) {
return false;
}
if (method == CollectMethod::COLLECT_METHOD_DISTINCT) {
return false;
}
/// @brief whether or not the method can be used
bool CollectOptions::canUseMethod(CollectMethod method) const {
return (this->method == method ||
this->method == CollectMethod::UNDEFINED);
}
return true;
/// @brief whether or not the method should be used (i.e. is preferred)
bool CollectOptions::shouldUseMethod(CollectMethod method) const {
return (this->method == method);
}
/// @brief convert the options to VelocyPack
@ -62,28 +61,28 @@ void CollectOptions::toVelocyPack(VPackBuilder& builder) const {
CollectOptions::CollectMethod CollectOptions::methodFromString(
std::string const& method) {
if (method == "hash") {
return CollectMethod::COLLECT_METHOD_HASH;
return CollectMethod::HASH;
}
if (method == "sorted") {
return CollectMethod::COLLECT_METHOD_SORTED;
return CollectMethod::SORTED;
}
if (method == "distinct") {
return CollectMethod::COLLECT_METHOD_DISTINCT;
return CollectMethod::DISTINCT;
}
return CollectMethod::COLLECT_METHOD_UNDEFINED;
return CollectMethod::UNDEFINED;
}
/// @brief stringify the aggregation method
std::string CollectOptions::methodToString(
CollectOptions::CollectMethod method) {
if (method == CollectMethod::COLLECT_METHOD_HASH) {
if (method == CollectMethod::HASH) {
return std::string("hash");
}
if (method == CollectMethod::COLLECT_METHOD_SORTED) {
if (method == CollectMethod::SORTED) {
return std::string("sorted");
}
if (method == CollectMethod::COLLECT_METHOD_DISTINCT) {
if (method == CollectMethod::DISTINCT) {
return std::string("distinct");
}

View File

@ -35,21 +35,24 @@ namespace aql {
/// @brief CollectOptions
struct CollectOptions {
/// @brief selected aggregation method
enum CollectMethod {
COLLECT_METHOD_UNDEFINED,
COLLECT_METHOD_HASH,
COLLECT_METHOD_SORTED,
COLLECT_METHOD_DISTINCT
enum class CollectMethod {
UNDEFINED,
HASH,
SORTED,
DISTINCT
};
/// @brief constructor, using default values
CollectOptions() : method(COLLECT_METHOD_UNDEFINED) {}
CollectOptions() : method(CollectMethod::UNDEFINED) {}
/// @brief constructor
explicit CollectOptions(arangodb::velocypack::Slice const&);
/// @brief whether or not the hash method can be used
bool canUseHashMethod() const;
/// @brief whether or not the method can be used
bool canUseMethod(CollectMethod method) const;
/// @brief whether or not the method should be used
bool shouldUseMethod(CollectMethod method) const;
/// @brief convert the options to VelocyPack
void toVelocyPack(arangodb::velocypack::Builder&) const;

View File

@ -34,7 +34,6 @@
#include "Transaction/Methods.h"
#include "Utils/OperationCursor.h"
#include "VocBase/LogicalCollection.h"
#include "VocBase/ManagedDocumentResult.h"
#include "VocBase/vocbase.h"
using namespace arangodb::aql;
@ -44,12 +43,11 @@ EnumerateCollectionBlock::EnumerateCollectionBlock(
: ExecutionBlock(engine, ep),
DocumentProducingBlock(ep, _trx),
_collection(ep->_collection),
_mmdr(new ManagedDocumentResult),
_cursor(
_trx->indexScan(_collection->getName(),
(ep->_random ? transaction::Methods::CursorType::ANY
: transaction::Methods::CursorType::ALL),
_mmdr.get(), false)) {
false)) {
TRI_ASSERT(_cursor->ok());
}

View File

@ -34,7 +34,6 @@
namespace arangodb {
class LocalDocumentId;
class ManagedDocumentResult;
struct OperationCursor;
namespace aql {
@ -67,8 +66,6 @@ class EnumerateCollectionBlock final : public ExecutionBlock, public DocumentPro
/// @brief collection
Collection* _collection;
std::unique_ptr<ManagedDocumentResult> _mmdr;
/// @brief cursor
std::unique_ptr<OperationCursor> _cursor;
};

View File

@ -140,16 +140,13 @@ static ExecutionBlock* CreateBlock(
auto aggregationMethod =
static_cast<CollectNode const*>(en)->aggregationMethod();
if (aggregationMethod ==
CollectOptions::CollectMethod::COLLECT_METHOD_HASH) {
if (aggregationMethod == CollectOptions::CollectMethod::HASH) {
return new HashedCollectBlock(engine,
static_cast<CollectNode const*>(en));
} else if (aggregationMethod ==
CollectOptions::CollectMethod::COLLECT_METHOD_SORTED) {
} else if (aggregationMethod == CollectOptions::CollectMethod::SORTED) {
return new SortedCollectBlock(engine,
static_cast<CollectNode const*>(en));
} else if (aggregationMethod ==
CollectOptions::CollectMethod::COLLECT_METHOD_DISTINCT) {
} else if (aggregationMethod == CollectOptions::CollectMethod::DISTINCT) {
return new DistinctCollectBlock(engine,
static_cast<CollectNode const*>(en));
}

View File

@ -390,6 +390,10 @@ class ExecutionNode {
/// @brief invalidate the cost estimation for the node and its dependencies
void invalidateCost();
/// @brief this actually estimates the costs as well as the number of items
/// coming out of the node
virtual double estimateCost(size_t& nrItems) const = 0;
/// @brief estimate the cost of the node . . .
double getCost(size_t& nrItems) const {
if (!_estimatedCostSet) {
@ -402,11 +406,7 @@ class ExecutionNode {
}
return _estimatedCost;
}
/// @brief this actually estimates the costs as well as the number of items
/// coming out of the node
virtual double estimateCost(size_t& nrItems) const = 0;
/// @brief walk a complete execution plan recursively
bool walk(WalkerWorker<ExecutionNode>* worker);
@ -438,8 +438,6 @@ class ExecutionNode {
auto v(getVariablesUsedHere());
std::unordered_set<VariableId> ids;
ids.reserve(v.size());
for (auto& it : v) {
ids.emplace(it->id);
}

View File

@ -576,7 +576,7 @@ CollectNode* ExecutionPlan::createAnonymousCollect(
_ast->variables()->variables(false), false, true);
registerNode(reinterpret_cast<ExecutionNode*>(en));
en->aggregationMethod(CollectOptions::COLLECT_METHOD_DISTINCT);
en->aggregationMethod(CollectOptions::CollectMethod::DISTINCT);
en->specialized();
return en;

View File

@ -38,6 +38,7 @@
#include "Aql/V8Expression.h"
#include "Aql/Variable.h"
#include "Basics/Exceptions.h"
#include "Basics/NumberUtils.h"
#include "Basics/StringBuffer.h"
#include "Basics/VelocyPackHelper.h"
#include "Basics/VPackStringBufferAdapter.h"
@ -631,15 +632,17 @@ AqlValue Expression::executeSimpleExpressionIndexedAccess(
}
if (indexResult.isString()) {
std::string const value = indexResult.slice().copyString();
VPackSlice s = indexResult.slice();
TRI_ASSERT(s.isString());
VPackValueLength l;
char const* p = s.getString(l);
try {
// stoll() might throw an exception if the string is not a number
int64_t position = static_cast<int64_t>(std::stoll(value));
bool valid;
int64_t position = NumberUtils::atoi<int64_t>(p, p + l, valid);
if (valid) {
return result.at(trx, position, mustDestroy, true);
} catch (...) {
// no number found.
}
}
// no number found.
}
// fall-through to returning null

File diff suppressed because it is too large Load Diff

View File

@ -69,8 +69,7 @@ struct Functions {
/// @brief extract a function parameter from the arguments
static AqlValue ExtractFunctionParameterValue(
transaction::Methods*, VPackFunctionParameters const& parameters,
size_t position);
VPackFunctionParameters const& parameters, size_t position);
/// @brief extra a collection name from an AqlValue
static std::string ExtractCollectionName(

View File

@ -75,7 +75,8 @@ void Optimizer::addPlan(std::unique_ptr<ExecutionPlan> plan, OptimizerRule const
// @brief the actual optimization
int Optimizer::createPlans(ExecutionPlan* plan,
std::vector<std::string> const& rulesSpecification,
bool inspectSimplePlans) {
bool inspectSimplePlans,
bool estimateAllPlans) {
_runOnlyRequiredRules = false;
// _plans contains the previous optimization result
_plans.clear();
@ -92,10 +93,15 @@ int Optimizer::createPlans(ExecutionPlan* plan,
plan->isDeadSimple()) {
// the plan is so simple that any further optimizations would probably cost
// more than simply executing the plan
estimatePlans();
if (!plan->varUsageComputed()) {
plan->findVarUsage();
}
if (estimateAllPlans) {
plan->getCost();
}
return TRI_ERROR_NO_ERROR;
}
int leastDoneLevel = 0;
TRI_ASSERT(!OptimizerRulesFeature::_rules.empty());
@ -199,31 +205,30 @@ int Optimizer::createPlans(ExecutionPlan* plan,
TRI_ASSERT(_plans.size() >= 1);
estimatePlans();
if (_plans.size() > 1) {
sortPlans();
// finalize plans
for (auto& p : _plans.list) {
if (!p->varUsageComputed()) {
p->findVarUsage();
}
}
// do cost estimation
if (estimateAllPlans || _plans.size() > 1) {
// only do estimatations is necessary
for (auto& p : _plans.list) {
p->getCost();
// this value is cached in the plan, so formally this step is
// unnecessary, but for the sake of cleanliness...
}
}
if (_plans.size() > 1) {
// only sort plans when necessary
std::sort(_plans.list.begin(), _plans.list.end(),
[](ExecutionPlan* const& a, ExecutionPlan* const& b)
-> bool { return a->getCost() < b->getCost(); });
}
LOG_TOPIC(TRACE, Logger::FIXME) << "optimization ends with " << _plans.size() << " plans";
return TRI_ERROR_NO_ERROR;
}
/// @brief estimatePlans
void Optimizer::estimatePlans() {
for (auto& p : _plans.list) {
if (!p->varUsageComputed()) {
p->findVarUsage();
}
p->getCost();
// this value is cached in the plan, so formally this step is
// unnecessary, but for the sake of cleanliness...
}
}
/// @brief sortPlans
void Optimizer::sortPlans() {
std::sort(_plans.list.begin(), _plans.list.end(),
[](ExecutionPlan* const& a, ExecutionPlan* const& b)
-> bool { return a->getCost() < b->getCost(); });
}

View File

@ -165,7 +165,8 @@ class Optimizer {
/// newly created plans it recalls and will automatically delete them.
/// If you need to extract the plans from the optimizer use stealBest or
/// stealPlans.
int createPlans(ExecutionPlan* p, std::vector<std::string> const&, bool);
int createPlans(ExecutionPlan* p, std::vector<std::string> const& rulesSpecification,
bool inspectSimplePlans, bool estimateAllPlans);
size_t hasEnoughPlans(size_t extraPlans) const;
@ -216,13 +217,6 @@ class Optimizer {
return res;
}
private:
/// @brief estimatePlans
void estimatePlans();
/// @brief sortPlans
void sortPlans();
public:
/// @brief optimizer statistics
Stats _stats;

View File

@ -930,9 +930,41 @@ void arangodb::aql::specializeCollectRule(Optimizer* opt,
bool const canUseHashAggregation =
(!groupVariables.empty() &&
(!collectNode->hasOutVariable() || collectNode->count()) &&
collectNode->getOptions().canUseHashMethod());
collectNode->getOptions().canUseMethod(CollectOptions::CollectMethod::HASH));
if (canUseHashAggregation && !opt->runOnlyRequiredRules()) {
if (collectNode->getOptions().shouldUseMethod(CollectOptions::CollectMethod::HASH)) {
// user has explicitly asked for hash method
// specialize existing the CollectNode so it will become a HashedCollectBlock
// later. additionally, add a SortNode BEHIND the CollectNode (to sort the
// final result)
collectNode->aggregationMethod(
CollectOptions::CollectMethod::HASH);
collectNode->specialized();
if (!collectNode->isDistinctCommand()) {
// add the post-SORT
SortElementVector sortElements;
for (auto const& v : collectNode->groupVariables()) {
sortElements.emplace_back(v.first, true);
}
auto sortNode =
new SortNode(plan.get(), plan->nextId(), sortElements, false);
plan->registerNode(sortNode);
TRI_ASSERT(collectNode->hasParent());
auto parent = collectNode->getFirstParent();
TRI_ASSERT(parent != nullptr);
sortNode->addDependency(collectNode);
parent->replaceDependency(collectNode, sortNode);
}
modified = true;
continue;
}
// create a new plan with the adjusted COLLECT node
std::unique_ptr<ExecutionPlan> newPlan(plan->clone());
@ -946,7 +978,7 @@ void arangodb::aql::specializeCollectRule(Optimizer* opt,
// additionally, add a SortNode BEHIND the CollectNode (to sort the
// final result)
newCollectNode->aggregationMethod(
CollectOptions::CollectMethod::COLLECT_METHOD_HASH);
CollectOptions::CollectMethod::HASH);
newCollectNode->specialized();
if (!collectNode->isDistinctCommand()) {
@ -973,7 +1005,7 @@ void arangodb::aql::specializeCollectRule(Optimizer* opt,
// this will tell the optimizer to optimize the cloned plan with this
// specific rule again
opt->addPlan(std::move(newPlan), rule, true,
static_cast<int>(rule->level - 1));
static_cast<int>(rule->level - 1));
} else {
// no need to run this specific rule again on the cloned plan
opt->addPlan(std::move(newPlan), rule, true);
@ -988,7 +1020,7 @@ void arangodb::aql::specializeCollectRule(Optimizer* opt,
// specialize the CollectNode so it will become a SortedCollectBlock
// later
collectNode->aggregationMethod(
CollectOptions::CollectMethod::COLLECT_METHOD_SORTED);
CollectOptions::CollectMethod::SORTED);
// insert a SortNode IN FRONT OF the CollectNode
if (!groupVariables.empty()) {

View File

@ -30,28 +30,9 @@
namespace arangodb {
namespace aql {
struct AstNode;
class Query;
struct QueryResult;
class Parser;
}
}
/// @brief forwards for the parse function provided by the parser (.y)
int Aqlparse(arangodb::aql::Parser*);
/// @brief forward for the init function provided by the lexer (.l)
int Aqllex_init(void**);
/// @brief forward for the shutdown function provided by the lexer (.l)
int Aqllex_destroy(void*);
/// @brief forward for the context function provided by the lexer (.l)
void Aqlset_extra(arangodb::aql::Parser*, void*);
namespace arangodb {
namespace aql {
/// @brief the parser
class Parser {
@ -185,4 +166,16 @@ class Parser {
}
}
/// @brief forward for the parse function provided by the parser (.y)
int Aqlparse(arangodb::aql::Parser*);
/// @brief forward for the init function provided by the lexer (.l)
int Aqllex_init(void**);
/// @brief forward for the shutdown function provided by the lexer (.l)
int Aqllex_destroy(void*);
/// @brief forward for the context function provided by the lexer (.l)
void Aqlset_extra(arangodb::aql::Parser*, void*);
#endif

View File

@ -480,7 +480,7 @@ ExecutionPlan* Query::prepare() {
arangodb::aql::Optimizer opt(_queryOptions.maxNumberOfPlans);
// get enabled/disabled rules
opt.createPlans(plan.release(), _queryOptions.optimizerRules,
_queryOptions.inspectSimplePlans);
_queryOptions.inspectSimplePlans, false);
// Now plan and all derived plans belong to the optimizer
plan.reset(opt.stealBest()); // Now we own the best one again
} else { // no queryString, we are instantiating from _queryBuilder
@ -590,7 +590,7 @@ QueryResult Query::execute(QueryRegistry* registry) {
options.buildUnindexedObjects = true;
auto resultBuilder = std::make_shared<VPackBuilder>(&options);
resultBuilder->buffer()->reserve(
resultBuilder->reserve(
16 * 1024); // reserve some space in Builder to avoid frequent reallocs
TRI_ASSERT(_engine != nullptr);
@ -990,7 +990,7 @@ QueryResult Query::explain() {
enterState(QueryExecutionState::ValueType::PLAN_OPTIMIZATION);
arangodb::aql::Optimizer opt(_queryOptions.maxNumberOfPlans);
// get enabled/disabled rules
opt.createPlans(plan, _queryOptions.optimizerRules, _queryOptions.inspectSimplePlans);
opt.createPlans(plan, _queryOptions.optimizerRules, _queryOptions.inspectSimplePlans, true);
enterState(QueryExecutionState::ValueType::FINALIZATION);

File diff suppressed because it is too large Load Diff

View File

@ -31,39 +31,25 @@
using namespace arangodb::aql;
////////////////////////////////////////////////////////////////////////////////
/// @brief shortcut macro for signaling out of memory
////////////////////////////////////////////////////////////////////////////////
#define ABORT_OOM \
parser->registerError(TRI_ERROR_OUT_OF_MEMORY); \
YYABORT;
#define scanner parser->scanner()
////////////////////////////////////////////////////////////////////////////////
/// @brief forward for lexer function defined in Aql/tokens.ll
////////////////////////////////////////////////////////////////////////////////
int Aqllex (YYSTYPE*,
YYLTYPE*,
void*);
////////////////////////////////////////////////////////////////////////////////
/// @brief register parse error
////////////////////////////////////////////////////////////////////////////////
void Aqlerror (YYLTYPE* locp,
arangodb::aql::Parser* parser,
char const* message) {
int Aqllex(YYSTYPE*, YYLTYPE*, void*);
/// @brief register parse error (this will also abort the currently running query)
void Aqlerror(YYLTYPE* locp,
arangodb::aql::Parser* parser,
char const* message) {
parser->registerParseError(TRI_ERROR_QUERY_PARSE, message, locp->first_line, locp->first_column);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief check if any of the variables used in the INTO expression were
/// introduced by the COLLECT itself, in which case it would fail
////////////////////////////////////////////////////////////////////////////////
static Variable const* CheckIntoVariables(AstNode const* collectVars,
std::unordered_set<Variable const*> const& vars) {
if (collectVars == nullptr || collectVars->type != NODE_TYPE_ARRAY) {
@ -86,11 +72,8 @@ static Variable const* CheckIntoVariables(AstNode const* collectVars,
return nullptr;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief register variables in the scope
////////////////////////////////////////////////////////////////////////////////
static void RegisterAssignVariables(arangodb::aql::Scopes* scopes, AstNode const* vars) {
static void RegisterAssignVariables(arangodb::aql::Scopes* scopes, AstNode const* vars) {
size_t const n = vars->numMembers();
for (size_t i = 0; i < n; ++i) {
auto member = vars->getMember(i);
@ -103,10 +86,7 @@ static void RegisterAssignVariables(arangodb::aql::Scopes* scopes, AstNode const
}
}
////////////////////////////////////////////////////////////////////////////////
/// @brief validate the aggregate variables expressions
////////////////////////////////////////////////////////////////////////////////
static bool ValidateAggregates(Parser* parser, AstNode const* aggregates) {
size_t const n = aggregates->numMembers();
@ -140,11 +120,8 @@ static bool ValidateAggregates(Parser* parser, AstNode const* aggregates) {
return true;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief start a new scope for the collect
////////////////////////////////////////////////////////////////////////////////
static bool StartCollectScope(arangodb::aql::Scopes* scopes) {
static bool StartCollectScope(arangodb::aql::Scopes* scopes) {
// check if we are in the main scope
if (scopes->type() == arangodb::aql::AQL_SCOPE_MAIN) {
return false;
@ -157,10 +134,7 @@ static bool StartCollectScope(arangodb::aql::Scopes* scopes) {
return true;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief get the INTO variable stored in a node (may not exist)
////////////////////////////////////////////////////////////////////////////////
static AstNode const* GetIntoVariable(Parser* parser, AstNode const* node) {
if (node == nullptr) {
return nullptr;
@ -180,10 +154,7 @@ static AstNode const* GetIntoVariable(Parser* parser, AstNode const* node) {
return parser->ast()->createNodeVariable(v->getStringValue(), v->getStringLength(), true);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief get the INTO variable = expression stored in a node (may not exist)
////////////////////////////////////////////////////////////////////////////////
static AstNode const* GetIntoExpression(AstNode const* node) {
if (node == nullptr || node->type == NODE_TYPE_VALUE) {
return nullptr;

View File

@ -892,16 +892,17 @@ static yyconst flex_int32_t yy_rule_can_match_eol[102] =
#include "Basics/Common.h"
#include "Basics/conversions.h"
#include "Basics/NumberUtils.h"
#include "Basics/StringUtils.h"
// introduce the namespace here, otherwise following references to
// the namespace in auto-generated headers might fail
namespace arangodb {
namespace aql {
class Query;
class Parser;
}
namespace aql {
class Query;
class Parser;
}
}
@ -911,7 +912,12 @@ namespace arangodb {
#define YY_EXTRA_TYPE arangodb::aql::Parser*
#define YY_USER_ACTION yylloc->first_line = (int) yylineno; yylloc->first_column = (int) yycolumn; yylloc->last_column = (int) (yycolumn + yyleng - 1); yycolumn += (int) yyleng; yyextra->increaseOffset(yyleng);
#define YY_USER_ACTION \
yylloc->first_line = static_cast<int>(yylineno); \
yylloc->first_column = static_cast<int>(yycolumn); \
yylloc->last_column = static_cast<int>(yycolumn + yyleng - 1); \
yycolumn += static_cast<int>(yyleng); \
yyextra->increaseOffset(yyleng);
#define YY_NO_INPUT 1
@ -923,8 +929,7 @@ namespace arangodb {
if (length > 0) { \
yyextra->fillBuffer(resultBuffer, length); \
resultState = length; \
} \
else { \
} else { \
resultState = YY_NULL; \
} \
}
@ -1973,10 +1978,12 @@ YY_RULE_SETUP
arangodb::aql::AstNode* node = nullptr;
auto parser = yyextra;
try {
int64_t value1 = arangodb::basics::StringUtils::int64_check(std::string(yytext, yyleng));
bool valid;
int64_t value1 = arangodb::NumberUtils::atoi<int64_t>(yytext, yytext + yyleng, valid);
if (valid) {
node = parser->ast()->createNodeValueInt(value1);
} catch (...) {
} else {
try {
double value2 = TRI_DoubleString(yytext);
node = parser->ast()->createNodeValueDouble(value2);

View File

@ -20,16 +20,17 @@
%{
#include "Basics/Common.h"
#include "Basics/conversions.h"
#include "Basics/NumberUtils.h"
#include "Basics/StringUtils.h"
// introduce the namespace here, otherwise following references to
// the namespace in auto-generated headers might fail
namespace arangodb {
namespace aql {
class Query;
class Parser;
}
namespace aql {
class Query;
class Parser;
}
}
@ -39,7 +40,12 @@ namespace arangodb {
#define YY_EXTRA_TYPE arangodb::aql::Parser*
#define YY_USER_ACTION yylloc->first_line = (int) yylineno; yylloc->first_column = (int) yycolumn; yylloc->last_column = (int) (yycolumn + yyleng - 1); yycolumn += (int) yyleng; yyextra->increaseOffset(yyleng);
#define YY_USER_ACTION \
yylloc->first_line = static_cast<int>(yylineno); \
yylloc->first_column = static_cast<int>(yycolumn); \
yylloc->last_column = static_cast<int>(yycolumn + yyleng - 1); \
yycolumn += static_cast<int>(yyleng); \
yyextra->increaseOffset(yyleng);
#define YY_NO_INPUT 1
@ -51,8 +57,7 @@ namespace arangodb {
if (length > 0) { \
yyextra->fillBuffer(resultBuffer, length); \
resultState = length; \
} \
else { \
} else { \
resultState = YY_NULL; \
} \
}
@ -455,10 +460,12 @@ namespace arangodb {
arangodb::aql::AstNode* node = nullptr;
auto parser = yyextra;
try {
int64_t value1 = arangodb::basics::StringUtils::int64_check(std::string(yytext, yyleng));
bool valid;
int64_t value1 = arangodb::NumberUtils::atoi<int64_t>(yytext, yytext + yyleng, valid);
if (valid) {
node = parser->ast()->createNodeValueInt(value1);
} catch (...) {
} else {
try {
double value2 = TRI_DoubleString(yytext);
node = parser->ast()->createNodeValueDouble(value2);

View File

@ -69,7 +69,7 @@ void AgencyCallback::refetchAndUpdate(bool needToAcquireMutex) {
basics::StringUtils::split(AgencyCommManager::path(key), '/');
kv.erase(std::remove(kv.begin(), kv.end(), ""), kv.end());
std::shared_ptr<VPackBuilder> newData = std::make_shared<VPackBuilder>();
auto newData = std::make_shared<VPackBuilder>();
newData->add(result.slice()[0].get(kv));
if (needToAcquireMutex) {

View File

@ -21,8 +21,8 @@
/// @author Andreas Streichardt
////////////////////////////////////////////////////////////////////////////////
#ifndef ARANGODB_CLUSTER_AGENCYCALLBACK_H
#define ARANGODB_CLUSTER_AGENCYCALLBACK_H
#ifndef ARANGODB_CLUSTER_AGENCY_CALLBACK_H
#define ARANGODB_CLUSTER_AGENCY_CALLBACK_H
#include "Basics/Common.h"

View File

@ -21,8 +21,8 @@
/// @author Andreas Streichardt
////////////////////////////////////////////////////////////////////////////////
#ifndef CLUSTER_AGENCYCALLACKREGISTRY_H
#define CLUSTER_AGENCYCALLACKREGISTRY_H 1
#ifndef CLUSTER_AGENCY_CALLBACK_REGISTRY_H
#define CLUSTER_AGENCY_CALLBACK_REGISTRY_H 1
#include "Cluster/AgencyCallback.h"
#include "Basics/ReadWriteLock.h"

View File

@ -304,7 +304,7 @@ struct ClusterCommResult {
struct ClusterCommCallback {
ClusterCommCallback() {}
virtual ~ClusterCommCallback(){};
virtual ~ClusterCommCallback() {}
//////////////////////////////////////////////////////////////////////////////
/// @brief the actual callback function

View File

@ -23,6 +23,7 @@
#include "ClusterMethods.h"
#include "Basics/conversions.h"
#include "Basics/NumberUtils.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringRef.h"
#include "Basics/StringUtils.h"
@ -293,8 +294,7 @@ static void extractErrorCodes(ClusterCommResult const& res,
for (auto const& code : VPackObjectIterator(codesSlice)) {
VPackValueLength codeLength;
char const* codeString = code.key.getString(codeLength);
int codeNr = static_cast<int>(arangodb::basics::StringUtils::int64(
codeString, static_cast<size_t>(codeLength)));
int codeNr = NumberUtils::atoi_zero<int>(codeString, codeString + codeLength);
if (includeNotFound || codeNr != TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND) {
errorCounter[codeNr] += code.value.getNumericValue<size_t>();
}
@ -458,7 +458,7 @@ static void collectResultsFromAllShards(
} else {
TRI_ASSERT(res.answer != nullptr);
resultMap.emplace(res.shardID,
res.answer->toVelocyPackBuilderPtr());
res.answer->toVelocyPackBuilderPtrNoUniquenessChecks());
extractErrorCodes(res, errorCounter, true);
responseCode = res.answer_code;
}
@ -1001,7 +1001,7 @@ int createDocumentOnCoordinator(
// Now prepare the requests:
std::vector<ClusterCommRequest> requests;
auto body = std::make_shared<std::string>();
std::shared_ptr<std::string> body;
for (auto const& it : shardMap) {
if (!useMultiple) {
@ -1056,7 +1056,7 @@ int createDocumentOnCoordinator(
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
return TRI_ERROR_NO_ERROR;
}
@ -1220,7 +1220,7 @@ int deleteDocumentOnCoordinator(
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
return TRI_ERROR_NO_ERROR;
}
@ -1272,7 +1272,7 @@ int deleteDocumentOnCoordinator(
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
}
}
@ -1303,7 +1303,7 @@ int deleteDocumentOnCoordinator(
responseCode = res.answer_code;
}
TRI_ASSERT(res.answer != nullptr);
allResults.emplace_back(res.answer->toVelocyPackBuilderPtr());
allResults.emplace_back(res.answer->toVelocyPackBuilderPtrNoUniquenessChecks());
extractErrorCodes(res, errorCounter, false);
}
// If we get here we get exactly one result for every shard.
@ -1580,7 +1580,7 @@ int getDocumentOnCoordinator(
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
return TRI_ERROR_NO_ERROR;
}
@ -1652,7 +1652,7 @@ int getDocumentOnCoordinator(
nrok++;
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
}
} else {
@ -1687,7 +1687,7 @@ int getDocumentOnCoordinator(
responseCode = res.answer_code;
}
TRI_ASSERT(res.answer != nullptr);
allResults.emplace_back(res.answer->toVelocyPackBuilderPtr());
allResults.emplace_back(res.answer->toVelocyPackBuilderPtrNoUniquenessChecks());
extractErrorCodes(res, errorCounter, false);
}
// If we get here we get exactly one result for every shard.
@ -1761,7 +1761,7 @@ int fetchEdgesFromEngines(
return commError;
}
TRI_ASSERT(res.answer != nullptr);
auto resBody = res.answer->toVelocyPackBuilderPtr();
auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
VPackSlice resSlice = resBody->slice();
if (!resSlice.isObject()) {
// Response has invalid format
@ -1856,7 +1856,7 @@ void fetchVerticesFromEngines(
THROW_ARANGO_EXCEPTION(commError);
}
TRI_ASSERT(res.answer != nullptr);
auto resBody = res.answer->toVelocyPackBuilderPtr();
auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
VPackSlice resSlice = resBody->slice();
if (!resSlice.isObject()) {
// Response has invalid format
@ -1961,7 +1961,7 @@ void fetchVerticesFromEngines(
THROW_ARANGO_EXCEPTION(commError);
}
TRI_ASSERT(res.answer != nullptr);
auto resBody = res.answer->toVelocyPackBuilderPtr();
auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
VPackSlice resSlice = resBody->slice();
if (!resSlice.isObject()) {
// Response has invalid format
@ -2068,7 +2068,7 @@ int getFilteredEdgesOnCoordinator(
return error;
}
TRI_ASSERT(res.answer != nullptr);
std::shared_ptr<VPackBuilder> shardResult = res.answer->toVelocyPackBuilderPtr();
std::shared_ptr<VPackBuilder> shardResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
if (shardResult == nullptr) {
return TRI_ERROR_INTERNAL;
@ -2285,7 +2285,7 @@ int modifyDocumentOnCoordinator(
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
return TRI_ERROR_NO_ERROR;
}
@ -2342,7 +2342,7 @@ int modifyDocumentOnCoordinator(
nrok++;
responseCode = res.answer_code;
TRI_ASSERT(res.answer != nullptr);
auto parsedResult = res.answer->toVelocyPackBuilderPtr();
auto parsedResult = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
resultBody.swap(parsedResult);
}
} else {
@ -2377,7 +2377,7 @@ int modifyDocumentOnCoordinator(
responseCode = res.answer_code;
}
TRI_ASSERT(res.answer != nullptr);
allResults.emplace_back(res.answer->toVelocyPackBuilderPtr());
allResults.emplace_back(res.answer->toVelocyPackBuilderPtrNoUniquenessChecks());
extractErrorCodes(res, errorCounter, false);
}
// If we get here we get exactly one result for every shard.
@ -2771,7 +2771,7 @@ int fetchEdgesFromEngines(
return commError;
}
TRI_ASSERT(res.answer != nullptr);
auto resBody = res.answer->toVelocyPackBuilderPtr();
auto resBody = res.answer->toVelocyPackBuilderPtrNoUniquenessChecks();
VPackSlice resSlice = resBody->slice();
if (!resSlice.isObject()) {
// Response has invalid format

View File

@ -35,7 +35,6 @@ class ManagedDocumentResult;
namespace transaction {
class Methods;
}
;
namespace traverser {
class ClusterEdgeCursor;

View File

@ -31,7 +31,6 @@
#include "Aql/RestAqlHandler.h"
#include "Basics/StringUtils.h"
#include "Cluster/AgencyCallbackRegistry.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterFeature.h"
#include "Cluster/RestAgencyCallbacksHandler.h"
#include "Cluster/RestClusterHandler.h"

View File

@ -79,11 +79,25 @@ RestHandler::~RestHandler() {
// --SECTION-- public methods
// -----------------------------------------------------------------------------
int RestHandler::prepareEngine() {
RequestStatistics::SET_REQUEST_START(_statistics);
uint64_t RestHandler::messageId() const {
uint64_t messageId = 0UL;
auto req = _request.get();
auto res = _response.get();
if (req) {
messageId = req->messageId();
} else if (res) {
messageId = res->messageId();
} else {
LOG_TOPIC(WARN, Logger::COMMUNICATION)
<< "could not find corresponding request/response";
}
return messageId;
}
int RestHandler::prepareEngine() {
// set end immediately so we do not get netative statistics
RequestStatistics::SET_REQUEST_END(_statistics);
RequestStatistics::SET_REQUEST_START_END(_statistics);
if (_canceled) {
_engine.setState(RestEngine::State::DONE);

View File

@ -121,7 +121,7 @@ class RestHandler : public std::enable_shared_from_this<RestHandler> {
public:
void initEngine(EventLoop loop,
std::function<void(RestHandler*)> storeResult) {
std::function<void(RestHandler*)> const& storeResult) {
_storeResult = storeResult;
_engine.init(loop);
}
@ -142,21 +142,6 @@ class RestHandler : public std::enable_shared_from_this<RestHandler> {
std::function<void(rest::RestHandler*)> _storeResult;
};
inline uint64_t RestHandler::messageId() const {
uint64_t messageId = 0UL;
auto req = _request.get();
auto res = _response.get();
if (req) {
messageId = req->messageId();
} else if (res) {
messageId = res->messageId();
} else {
LOG_TOPIC(WARN, Logger::COMMUNICATION)
<< "could not find corresponding request/response";
}
return messageId;
}
}
}

View File

@ -43,7 +43,6 @@ class ManagedDocumentResult;
namespace transaction {
class Methods;
}
;
namespace velocypack {
class Builder;

View File

@ -45,6 +45,18 @@ IndexIterator::IndexIterator(LogicalCollection* collection,
TRI_ASSERT(_mmdr != nullptr);
}
IndexIterator::IndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
arangodb::Index const* index)
: _collection(collection),
_trx(trx),
_mmdr(nullptr),
_context(trx, collection, nullptr, index->fields().size()),
_responsible(false) {
TRI_ASSERT(_collection != nullptr);
TRI_ASSERT(_trx != nullptr);
}
/// @brief default destructor. Does not free anything
IndexIterator::~IndexIterator() {
if (_responsible) {

View File

@ -74,6 +74,7 @@ class IndexIterator {
IndexIterator() = delete;
IndexIterator(LogicalCollection*, transaction::Methods*, ManagedDocumentResult*, arangodb::Index const*);
IndexIterator(LogicalCollection*, transaction::Methods*, arangodb::Index const*);
virtual ~IndexIterator();
@ -106,8 +107,8 @@ class IndexIterator {
/// @brief Special iterator if the condition cannot have any result
class EmptyIndexIterator final : public IndexIterator {
public:
EmptyIndexIterator(LogicalCollection* collection, transaction::Methods* trx, ManagedDocumentResult* mmdr, arangodb::Index const* index)
: IndexIterator(collection, trx, mmdr, index) {}
EmptyIndexIterator(LogicalCollection* collection, transaction::Methods* trx, arangodb::Index const* index)
: IndexIterator(collection, trx, index) {}
~EmptyIndexIterator() {}

View File

@ -35,7 +35,7 @@ IndexLookupContext::IndexLookupContext(transaction::Methods* trx,
: _trx(trx), _collection(collection), _result(result), _numFields(numFields) {
TRI_ASSERT(_trx != nullptr);
TRI_ASSERT(_collection != nullptr);
TRI_ASSERT(_result != nullptr);
// note: _result can be a nullptr
}
uint8_t const* IndexLookupContext::lookup(LocalDocumentId token) {

View File

@ -176,21 +176,21 @@ AqlValue MMFilesAqlFunctions::Fulltext(
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "FULLTEXT", 3, 4);
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue collectionValue = ExtractFunctionParameterValue(parameters, 0);
if (!collectionValue.isString()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
}
std::string const cname(collectionValue.slice().copyString());
AqlValue attribute = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue attribute = ExtractFunctionParameterValue(parameters, 1);
if (!attribute.isString()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
}
std::string const attributeName(attribute.slice().copyString());
AqlValue queryValue = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue queryValue = ExtractFunctionParameterValue(parameters, 2);
if (!queryValue.isString()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
@ -199,7 +199,7 @@ AqlValue MMFilesAqlFunctions::Fulltext(
size_t maxResults = 0; // 0 means "all results"
if (parameters.size() >= 4) {
AqlValue limit = ExtractFunctionParameterValue(trx, parameters, 3);
AqlValue limit = ExtractFunctionParameterValue(parameters, 3);
if (!limit.isNull(true) && !limit.isNumber()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
@ -299,7 +299,7 @@ AqlValue MMFilesAqlFunctions::Near(arangodb::aql::Query* query,
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "NEAR", 3, 5);
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue collectionValue = ExtractFunctionParameterValue(parameters, 0);
if (!collectionValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "NEAR");
@ -307,8 +307,8 @@ AqlValue MMFilesAqlFunctions::Near(arangodb::aql::Query* query,
std::string const collectionName(collectionValue.slice().copyString());
AqlValue latitude = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue longitude = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue latitude = ExtractFunctionParameterValue(parameters, 1);
AqlValue longitude = ExtractFunctionParameterValue(parameters, 2);
if (!latitude.isNumber() || !longitude.isNumber()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -319,7 +319,7 @@ AqlValue MMFilesAqlFunctions::Near(arangodb::aql::Query* query,
int64_t limitValue = 100;
if (parameters.size() > 3) {
AqlValue limit = ExtractFunctionParameterValue(trx, parameters, 3);
AqlValue limit = ExtractFunctionParameterValue(parameters, 3);
if (limit.isNumber()) {
limitValue = limit.toInt64(trx);
@ -332,7 +332,7 @@ AqlValue MMFilesAqlFunctions::Near(arangodb::aql::Query* query,
std::string attributeName;
if (parameters.size() > 4) {
// have a distance attribute
AqlValue distanceValue = ExtractFunctionParameterValue(trx, parameters, 4);
AqlValue distanceValue = ExtractFunctionParameterValue(parameters, 4);
if (!distanceValue.isNull(true) && !distanceValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -362,7 +362,7 @@ AqlValue MMFilesAqlFunctions::Within(
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "WITHIN", 4, 5);
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue collectionValue = ExtractFunctionParameterValue(parameters, 0);
if (!collectionValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -371,9 +371,9 @@ AqlValue MMFilesAqlFunctions::Within(
std::string const collectionName(collectionValue.slice().copyString());
AqlValue latitudeValue = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue longitudeValue = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue radiusValue = ExtractFunctionParameterValue(trx, parameters, 3);
AqlValue latitudeValue = ExtractFunctionParameterValue(parameters, 1);
AqlValue longitudeValue = ExtractFunctionParameterValue(parameters, 2);
AqlValue radiusValue = ExtractFunctionParameterValue(parameters, 3);
if (!latitudeValue.isNumber() || !longitudeValue.isNumber() || !radiusValue.isNumber()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -383,7 +383,7 @@ AqlValue MMFilesAqlFunctions::Within(
std::string attributeName;
if (parameters.size() > 4) {
// have a distance attribute
AqlValue distanceValue = ExtractFunctionParameterValue(trx, parameters, 4);
AqlValue distanceValue = ExtractFunctionParameterValue(parameters, 4);
if (!distanceValue.isNull(true) && !distanceValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(

View File

@ -77,8 +77,8 @@ namespace {
class MMFilesIndexFillerTask : public basics::LocalTask {
public:
MMFilesIndexFillerTask(
std::shared_ptr<basics::LocalTaskQueue> queue, transaction::Methods* trx, Index* idx,
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> documents)
std::shared_ptr<basics::LocalTaskQueue> const& queue, transaction::Methods* trx, Index* idx,
std::shared_ptr<std::vector<std::pair<LocalDocumentId, VPackSlice>>> const& documents)
: LocalTask(queue), _trx(trx), _idx(idx), _documents(documents) {}
void run() {
@ -2440,14 +2440,13 @@ bool MMFilesCollection::removeIndex(TRI_idx_iid_t iid) {
}
std::unique_ptr<IndexIterator> MMFilesCollection::getAllIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr, bool reverse) const {
return std::unique_ptr<IndexIterator>(
primaryIndex()->allIterator(trx, mdr, reverse));
transaction::Methods* trx, bool reverse) const {
return std::unique_ptr<IndexIterator>(primaryIndex()->allIterator(trx, reverse));
}
std::unique_ptr<IndexIterator> MMFilesCollection::getAnyIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const {
return std::unique_ptr<IndexIterator>(primaryIndex()->anyIterator(trx, mdr));
transaction::Methods* trx) const {
return std::unique_ptr<IndexIterator>(primaryIndex()->anyIterator(trx));
}
void MMFilesCollection::invokeOnAllElements(
@ -2910,7 +2909,7 @@ Result MMFilesCollection::insert(transaction::Methods* trx,
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
VPackSlice doc(marker->vpack());
VPackSlice const doc(marker->vpack());
operation.setDocumentIds(MMFilesDocumentDescriptor(),
MMFilesDocumentDescriptor(documentId, doc.begin()));
@ -2966,11 +2965,7 @@ Result MMFilesCollection::insert(transaction::Methods* trx,
}
if (res.ok()) {
uint8_t const* vpack = lookupDocumentVPack(documentId);
if (vpack != nullptr) {
result.setUnmanaged(vpack, documentId);
}
result.setManaged(doc.begin(), documentId);
// store the tick that was used for writing the document
resultMarkerTick = operation.tick();
}
@ -3286,10 +3281,9 @@ Result MMFilesCollection::update(
return res;
}
uint8_t const* vpack = previous.vpack();
LocalDocumentId const oldDocumentId = previous.localDocumentId();
VPackSlice oldDoc(vpack);
TRI_voc_rid_t oldRevisionId =
VPackSlice const oldDoc(previous.vpack());
TRI_voc_rid_t const oldRevisionId =
transaction::helpers::extractRevFromDocument(oldDoc);
prevRev = oldRevisionId;
@ -3321,7 +3315,7 @@ Result MMFilesCollection::update(
if (_logicalCollection->waitForSync()) {
options.waitForSync = true;
}
return Result(TRI_ERROR_NO_ERROR);
return Result();
}
// merge old and new values
@ -3370,11 +3364,6 @@ Result MMFilesCollection::update(
operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()),
MMFilesDocumentDescriptor(documentId, newDoc.begin()));
if (oldRevisionId == revisionId) {
// update with same revision id => can happen if isRestore = true
result.reset();
}
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId,
newDoc, operation, marker, options,
options.waitForSync);
@ -3391,10 +3380,8 @@ Result MMFilesCollection::update(
if (res.fail()) {
operation.revert(trx);
} else {
uint8_t const* vpack = lookupDocumentVPack(documentId);
if (vpack != nullptr) {
result.setUnmanaged(vpack, documentId);
}
result.setManaged(newDoc.begin(), documentId);
if (options.waitForSync) {
// store the tick that was used for writing the new document
resultMarkerTick = operation.tick();
@ -3507,11 +3494,6 @@ Result MMFilesCollection::replace(
operation.setDocumentIds(MMFilesDocumentDescriptor(oldDocumentId, oldDoc.begin()),
MMFilesDocumentDescriptor(documentId, newDoc.begin()));
if (oldDocumentId == documentId) {
// update with same revision id => can happen if isRestore = true
result.reset();
}
res = updateDocument(trx, revisionId, oldDocumentId, oldDoc, documentId,
newDoc, operation, marker, options,
options.waitForSync);
@ -3528,14 +3510,7 @@ Result MMFilesCollection::replace(
if (res.fail()) {
operation.revert(trx);
} else {
if (oldDocumentId == documentId) {
// update with same revision id => can happen if isRestore = true
result.reset();
}
uint8_t const* vpack = lookupDocumentVPack(documentId);
if (vpack != nullptr) {
result.setUnmanaged(vpack, documentId);
}
result.setManaged(newDoc.begin(), documentId);
if (options.waitForSync) {
// store the tick that was used for writing the new document
@ -3611,8 +3586,7 @@ Result MMFilesCollection::remove(arangodb::transaction::Methods* trx,
return res;
}
uint8_t const* vpack = previous.vpack();
VPackSlice oldDoc(vpack);
VPackSlice const oldDoc(previous.vpack());
LocalDocumentId const oldDocumentId = previous.localDocumentId();
TRI_voc_rid_t oldRevisionId = arangodb::transaction::helpers::extractRevFromDocument(oldDoc);
prevRev = oldRevisionId;
@ -3669,7 +3643,7 @@ Result MMFilesCollection::remove(arangodb::transaction::Methods* trx,
static_cast<MMFilesTransactionState*>(trx->state())
->addOperation(documentId, revisionId, operation, marker, options.waitForSync);
} catch (basics::Exception const& ex) {
res = Result(ex.code());
res = Result(ex.code(), ex.what());
} catch (std::bad_alloc const&) {
res = Result(TRI_ERROR_OUT_OF_MEMORY);
} catch (std::exception const& ex) {
@ -3896,7 +3870,7 @@ Result MMFilesCollection::updateDocument(
options.indexOperationMode);
if (res.fail()) {
// re-enter the document in case of failure, ignore errors during rollback
// re-insert the document in case of failure, ignore errors during rollback
insertSecondaryIndexes(trx, oldDocumentId, oldDoc,
Index::OperationMode::rollback);
return res;
@ -3915,8 +3889,9 @@ Result MMFilesCollection::updateDocument(
return res;
}
// update the index element (primary index only - other index have been
// update the index element (primary index only - other indexes have been
// adjusted)
// TODO: pass key into this function so it does not have to be looked up again
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(newDoc));
MMFilesSimpleIndexElement* element =
primaryIndex()->lookupKeyRef(trx, keySlice);
@ -3928,11 +3903,9 @@ Result MMFilesCollection::updateDocument(
operation.indexed();
if (oldDocumentId != newDocumentId) {
try {
removeLocalDocumentId(oldDocumentId, true);
} catch (...) {
}
try {
removeLocalDocumentId(oldDocumentId, true);
} catch (...) {
}
TRI_IF_FAILURE("UpdateDocumentNoOperation") {

View File

@ -292,10 +292,9 @@ class MMFilesCollection final : public PhysicalCollection {
std::shared_ptr<Index> lookupIndex(velocypack::Slice const&) const override;
std::unique_ptr<IndexIterator> getAllIterator(transaction::Methods* trx,
ManagedDocumentResult* mdr,
bool reverse) const override;
std::unique_ptr<IndexIterator> getAnyIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const override;
transaction::Methods* trx) const override;
void invokeOnAllElements(
transaction::Methods* trx,
std::function<bool(LocalDocumentId const&)> callback) override;

View File

@ -386,14 +386,14 @@ IndexIterator* MMFilesEdgeIndex::iteratorForCondition(
// a.b IN values
if (!valNode->isArray()) {
// a.b IN non-array
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
return createInIterator(trx, mmdr, attrNode, valNode);
}
// operator type unsupported
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
/// @brief specializes the condition for use with the index

View File

@ -245,7 +245,6 @@ IndexIterator* MMFilesFulltextIndex::iteratorForCondition(
size_t numMembers = args->numMembers();
TRI_ASSERT(numMembers == 3 || numMembers == 4);
std::string attr = args->getMember(1)->getString();
std::string query = args->getMember(2)->getString();
size_t limit = 0;

View File

@ -128,7 +128,7 @@ class MMFilesFulltextIndexIterator : public IndexIterator {
TRI_ASSERT(limit > 0);
while (_pos != _docs.end() && limit > 0) {
cb(LocalDocumentId(*_pos));
_pos++;
++_pos;
limit--;
}
return _pos != _docs.end();
@ -138,7 +138,7 @@ class MMFilesFulltextIndexIterator : public IndexIterator {
void skip(uint64_t count, uint64_t& skipped) override {
while (_pos != _docs.end()) {
_pos++;
++_pos;
skipped++;
}
}

View File

@ -54,9 +54,9 @@ static std::vector<std::vector<arangodb::basics::AttributeName>> const
MMFilesPrimaryIndexIterator::MMFilesPrimaryIndexIterator(
LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr, MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndex const* index,
std::unique_ptr<VPackBuilder>& keys)
: IndexIterator(collection, trx, mmdr, index),
: IndexIterator(collection, trx, index),
_index(index),
_keys(keys.get()),
_iterator(_keys->slice()) {
@ -77,6 +77,7 @@ bool MMFilesPrimaryIndexIterator::next(LocalDocumentIdCallback const& cb, size_t
return false;
}
while (_iterator.valid() && limit > 0) {
// TODO: use version that hands in an existing mmdr
MMFilesSimpleIndexElement result =
_index->lookupKey(_trx, _iterator.value());
_iterator.next();
@ -92,9 +93,9 @@ void MMFilesPrimaryIndexIterator::reset() { _iterator.reset(); }
MMFilesAllIndexIterator::MMFilesAllIndexIterator(
LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr, MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl, bool reverse)
: IndexIterator(collection, trx, mmdr, index),
: IndexIterator(collection, trx, index),
_index(indexImpl),
_reverse(reverse),
_total(0) {}
@ -103,9 +104,9 @@ bool MMFilesAllIndexIterator::next(LocalDocumentIdCallback const& cb, size_t lim
while (limit > 0) {
MMFilesSimpleIndexElement element;
if (_reverse) {
element = _index->findSequentialReverse(&_context, _position);
element = _index->findSequentialReverse(nullptr, _position);
} else {
element = _index->findSequential(&_context, _position, _total);
element = _index->findSequential(nullptr, _position, _total);
}
if (element) {
cb(LocalDocumentId{element.localDocumentId()});
@ -125,9 +126,9 @@ bool MMFilesAllIndexIterator::nextDocument(DocumentCallback const& cb, size_t li
while (limit > 0) {
MMFilesSimpleIndexElement element;
if (_reverse) {
element = _index->findSequentialReverse(&_context, _position);
element = _index->findSequentialReverse(nullptr, _position);
} else {
element = _index->findSequential(&_context, _position, _total);
element = _index->findSequential(nullptr, _position, _total);
}
if (element) {
_documentIds.emplace_back(std::make_pair(element.localDocumentId(), nullptr));
@ -148,9 +149,9 @@ void MMFilesAllIndexIterator::skip(uint64_t count, uint64_t& skipped) {
while (count > 0) {
MMFilesSimpleIndexElement element;
if (_reverse) {
element = _index->findSequentialReverse(&_context, _position);
element = _index->findSequentialReverse(nullptr, _position);
} else {
element = _index->findSequential(&_context, _position, _total);
element = _index->findSequential(nullptr, _position, _total);
}
if (element) {
++skipped;
@ -165,9 +166,9 @@ void MMFilesAllIndexIterator::reset() { _position.reset(); }
MMFilesAnyIndexIterator::MMFilesAnyIndexIterator(
LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr, MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl)
: IndexIterator(collection, trx, mmdr, index),
: IndexIterator(collection, trx, index),
_index(indexImpl),
_step(0),
_total(0) {}
@ -175,7 +176,7 @@ MMFilesAnyIndexIterator::MMFilesAnyIndexIterator(
bool MMFilesAnyIndexIterator::next(LocalDocumentIdCallback const& cb, size_t limit) {
while (limit > 0) {
MMFilesSimpleIndexElement element =
_index->findRandom(&_context, _initial, _position, _step, _total);
_index->findRandom(nullptr, _initial, _position, _step, _total);
if (element) {
cb(LocalDocumentId{element.localDocumentId()});
--limit;
@ -337,19 +338,15 @@ MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupSequential(
/// @brief request an iterator over all elements in the index in
/// a sequential order.
IndexIterator* MMFilesPrimaryIndex::allIterator(transaction::Methods* trx,
ManagedDocumentResult* mmdr,
bool reverse) const {
return new MMFilesAllIndexIterator(_collection, trx, mmdr, this,
_primaryIndex.get(), reverse);
return new MMFilesAllIndexIterator(_collection, trx, this, _primaryIndex.get(), reverse);
}
/// @brief request an iterator over all elements in the index in
/// a random order. It is guaranteed that each element is found
/// exactly once unless the collection is modified.
IndexIterator* MMFilesPrimaryIndex::anyIterator(
transaction::Methods* trx, ManagedDocumentResult* mmdr) const {
return new MMFilesAnyIndexIterator(_collection, trx, mmdr, this,
_primaryIndex.get());
IndexIterator* MMFilesPrimaryIndex::anyIterator(transaction::Methods* trx) const {
return new MMFilesAnyIndexIterator(_collection, trx, this, _primaryIndex.get());
}
/// @brief a method to iterate over all elements in the index in
@ -365,26 +362,12 @@ MMFilesSimpleIndexElement MMFilesPrimaryIndex::lookupSequentialReverse(
}
/// @brief adds a key/element to the index
/// returns a status code, and *found will contain a found element (if any)
Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
LocalDocumentId const& documentId,
VPackSlice const& doc,
OperationMode mode) {
ManagedDocumentResult result;
IndexLookupContext context(trx, _collection, &result, 1);
MMFilesSimpleIndexElement element(buildKeyElement(documentId, doc));
int res = _primaryIndex->insert(&context, element);
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
std::string existingId(doc.get(StaticStrings::KeyString).copyString());
if (mode == OperationMode::internal) {
return IndexResult(res, std::move(existingId));
}
return IndexResult(res, this, existingId);
}
return IndexResult(res, this);
ManagedDocumentResult mmdr;
return insertKey(trx, documentId, doc, mmdr, mode);
}
Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
@ -395,6 +378,9 @@ Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
IndexLookupContext context(trx, _collection, &mmdr, 1);
MMFilesSimpleIndexElement element(buildKeyElement(documentId, doc));
// TODO: we can pass in a special IndexLookupContext which has some more on the information
// about the to-be-inserted document. this way we can spare one lookup in
// IsEqualElementElementByKey
int res = _primaryIndex->insert(&context, element);
if (res == TRI_ERROR_ARANGO_UNIQUE_CONSTRAINT_VIOLATED) {
@ -408,23 +394,13 @@ Result MMFilesPrimaryIndex::insertKey(transaction::Methods* trx,
return IndexResult(res, this);
}
/// @brief removes an key/element from the index
/// @brief removes a key/element from the index
Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
LocalDocumentId const&,
LocalDocumentId const& documentId,
VPackSlice const& doc,
OperationMode mode) {
ManagedDocumentResult result;
IndexLookupContext context(trx, _collection, &result, 1);
VPackSlice keySlice(transaction::helpers::extractKeyFromDocument(doc));
MMFilesSimpleIndexElement found =
_primaryIndex->removeByKey(&context, keySlice.begin());
if (!found) {
return IndexResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND, this);
}
return Result(TRI_ERROR_NO_ERROR);
ManagedDocumentResult mmdr;
return removeKey(trx, documentId, doc, mmdr, mode);
}
Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
@ -442,7 +418,7 @@ Result MMFilesPrimaryIndex::removeKey(transaction::Methods* trx,
return IndexResult(TRI_ERROR_ARANGO_DOCUMENT_NOT_FOUND, this);
}
return Result(TRI_ERROR_NO_ERROR);
return Result();
}
/// @brief resizes the index
@ -504,14 +480,14 @@ IndexIterator* MMFilesPrimaryIndex::iteratorForCondition(
// a.b IN values
if (!valNode->isArray()) {
// a.b IN non-array
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
return createInIterator(trx, mmdr, attrNode, valNode);
}
// operator type unsupported
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
/// @brief specializes the condition for use with the index
@ -551,7 +527,7 @@ IndexIterator* MMFilesPrimaryIndex::createInIterator(
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
keys->close();
return new MMFilesPrimaryIndexIterator(_collection, trx, mmdr, this, keys);
return new MMFilesPrimaryIndexIterator(_collection, trx, this, keys);
}
/// @brief create the iterator, for a single attribute, EQ operator
@ -574,7 +550,7 @@ IndexIterator* MMFilesPrimaryIndex::createEqIterator(
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
}
keys->close();
return new MMFilesPrimaryIndexIterator(_collection, trx, mmdr, this, keys);
return new MMFilesPrimaryIndexIterator(_collection, trx, this, keys);
}
/// @brief add a single value node to the iterator's keys

View File

@ -73,6 +73,16 @@ struct MMFilesPrimaryIndexHelper {
inline bool IsEqualElementElement(void* userData,
MMFilesSimpleIndexElement const& left,
MMFilesSimpleIndexElement const& right) const {
return (left.localDocumentId() == right.localDocumentId());
}
inline bool IsEqualElementElementByKey(void* userData,
MMFilesSimpleIndexElement const& left,
MMFilesSimpleIndexElement const& right) const {
if (left.hash() != right.hash()) {
// TODO: check if we have many collisions here
return false;
}
IndexLookupContext* context = static_cast<IndexLookupContext*>(userData);
TRI_ASSERT(context != nullptr);
@ -82,12 +92,6 @@ struct MMFilesPrimaryIndexHelper {
TRI_ASSERT(r.isString());
return l.equals(r);
}
inline bool IsEqualElementElementByKey(void* userData,
MMFilesSimpleIndexElement const& left,
MMFilesSimpleIndexElement const& right) const {
return IsEqualElementElement(userData, left, right);
}
};
typedef arangodb::basics::AssocUnique<uint8_t, MMFilesSimpleIndexElement, MMFilesPrimaryIndexHelper>
@ -97,7 +101,6 @@ class MMFilesPrimaryIndexIterator final : public IndexIterator {
public:
MMFilesPrimaryIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
std::unique_ptr<VPackBuilder>& keys);
@ -119,7 +122,6 @@ class MMFilesAllIndexIterator final : public IndexIterator {
public:
MMFilesAllIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl,
bool reverse);
@ -147,7 +149,6 @@ class MMFilesAnyIndexIterator final : public IndexIterator {
public:
MMFilesAnyIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
MMFilesPrimaryIndex const* index,
MMFilesPrimaryIndexImpl const* indexImpl);
@ -232,14 +233,12 @@ class MMFilesPrimaryIndex final : public MMFilesIndex {
/// @brief request an iterator over all elements in the index in
/// a sequential order.
IndexIterator* allIterator(transaction::Methods*, ManagedDocumentResult*,
bool reverse) const;
IndexIterator* allIterator(transaction::Methods*, bool reverse) const;
/// @brief request an iterator over all elements in the index in
/// a random order. It is guaranteed that each element is found
/// exactly once unless the collection is modified.
IndexIterator* anyIterator(transaction::Methods*,
ManagedDocumentResult*) const;
IndexIterator* anyIterator(transaction::Methods*) const;
/// @brief a method to iterate over all elements in the index in
/// reversed sequential order.

View File

@ -1283,7 +1283,7 @@ IndexIterator* MMFilesSkiplistIndex::iteratorForCondition(
// will have _fields many entries.
TRI_ASSERT(mapping.size() == _fields.size());
if (!findMatchingConditions(node, reference, mapping, usesIn)) {
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
} else {
TRI_IF_FAILURE("SkiplistIndex::noSortIterator") {

View File

@ -99,7 +99,10 @@ bool MMFilesTransactionCollection::isLocked(AccessMode::Type accessType, int nes
/// @brief check whether a collection is locked at all
bool MMFilesTransactionCollection::isLocked() const {
if (CollectionLockState::_noLockHeaders != nullptr && _collection != nullptr) {
if (_collection == nullptr) {
return false;
}
if (CollectionLockState::_noLockHeaders != nullptr) {
std::string collName(_collection->name());
auto it = CollectionLockState::_noLockHeaders->find(collName);
if (it != CollectionLockState::_noLockHeaders->end()) {

View File

@ -358,9 +358,8 @@ void GraphStore<V, E>::_loadVertices(size_t i,
trx->pinData(cid); // will throw when it fails
PregelShard sourceShard = (PregelShard)_config->shardId(vertexShard);
ManagedDocumentResult mmdr;
std::unique_ptr<OperationCursor> cursor =
trx->indexScan(vertexShard, transaction::Methods::CursorType::ALL, &mmdr, false);
trx->indexScan(vertexShard, transaction::Methods::CursorType::ALL, false);
if (cursor->fail()) {
THROW_ARANGO_EXCEPTION_FORMAT(cursor->code, "while looking up shard '%s'",

View File

@ -45,6 +45,7 @@ struct ReplicationApplierState {
ReplicationApplierState();
~ReplicationApplierState();
ReplicationApplierState(ReplicationApplierState const& other) = delete;
ReplicationApplierState& operator=(ReplicationApplierState const& other);
void reset(bool resetState);

View File

@ -24,6 +24,7 @@
#include "TailingSyncer.h"
#include "Basics/Exceptions.h"
#include "Basics/NumberUtils.h"
#include "Basics/ReadLocker.h"
#include "Basics/Result.h"
#include "Basics/StaticStrings.h"
@ -142,8 +143,7 @@ bool TailingSyncer::skipMarker(TRI_voc_tick_t firstRegularTick,
std::string const tick = VelocyPackHelper::getStringValue(slice, "tick", "");
if (!tick.empty()) {
tooOld = (static_cast<TRI_voc_tick_t>(StringUtils::uint64(
tick.c_str(), tick.size())) < firstRegularTick);
tooOld = (NumberUtils::atoi_zero<TRI_voc_tick_t>(tick.data(), tick.data() + tick.size()) < firstRegularTick);
if (tooOld) {
int typeValue = VelocyPackHelper::getNumericValue<int>(slice, "type", 0);
@ -161,8 +161,7 @@ bool TailingSyncer::skipMarker(TRI_voc_tick_t firstRegularTick,
VelocyPackHelper::getStringValue(slice, "tid", "");
if (!id.empty()) {
TRI_voc_tid_t tid = static_cast<TRI_voc_tid_t>(
StringUtils::uint64(id.c_str(), id.size()));
TRI_voc_tid_t tid = NumberUtils::atoi_zero<TRI_voc_tid_t>(id.data(), id.data() + id.size());
if (tid > 0 &&
_ongoingTransactions.find(tid) != _ongoingTransactions.end()) {
@ -333,8 +332,7 @@ Result TailingSyncer::processDocument(TRI_replication_operation_e type,
if (!transactionId.empty()) {
// operation is part of a transaction
tid = static_cast<TRI_voc_tid_t>(
StringUtils::uint64(transactionId.c_str(), transactionId.size()));
tid = NumberUtils::atoi_zero<TRI_voc_tid_t>(transactionId.data(), transactionId.data() + transactionId.size());
}
if (tid > 0) { // part of a transaction
@ -417,8 +415,7 @@ Result TailingSyncer::startTransaction(VPackSlice const& slice) {
// transaction id
// note: this is the remote transaction id!
TRI_voc_tid_t tid =
static_cast<TRI_voc_tid_t>(StringUtils::uint64(id.c_str(), id.size()));
TRI_voc_tid_t tid = NumberUtils::atoi_zero<TRI_voc_tid_t>(id.data(), id.data() + id.size());
auto it = _ongoingTransactions.find(tid);
@ -461,8 +458,7 @@ Result TailingSyncer::abortTransaction(VPackSlice const& slice) {
// transaction id
// note: this is the remote transaction id!
TRI_voc_tid_t const tid =
static_cast<TRI_voc_tid_t>(StringUtils::uint64(id.c_str(), id.size()));
TRI_voc_tid_t const tid = NumberUtils::atoi_zero<TRI_voc_tid_t>(id.data(), id.data() + id.size());
auto it = _ongoingTransactions.find(tid);
@ -499,9 +495,8 @@ Result TailingSyncer::commitTransaction(VPackSlice const& slice) {
}
// transaction id
// note: this is the remote trasnaction id!
TRI_voc_tid_t const tid =
static_cast<TRI_voc_tid_t>(StringUtils::uint64(id.c_str(), id.size()));
// note: this is the remote transaction id!
TRI_voc_tid_t const tid = NumberUtils::atoi_zero<TRI_voc_tid_t>(id.data(), id.data() + id.size());
auto it = _ongoingTransactions.find(tid);
@ -618,8 +613,7 @@ Result TailingSyncer::applyLogMarker(VPackSlice const& slice,
std::string const tick = VelocyPackHelper::getStringValue(slice, "tick", "");
if (!tick.empty()) {
TRI_voc_tick_t newTick = static_cast<TRI_voc_tick_t>(
StringUtils::uint64(tick.c_str(), tick.size()));
TRI_voc_tick_t newTick = NumberUtils::atoi_zero<TRI_voc_tick_t>(tick.data(), tick.data() + tick.size());
if (newTick >= firstRegularTick) {
WRITE_LOCKER_EVENTUAL(writeLocker, _applier->_statusLock);
if (newTick > _applier->_state._lastProcessedContinuousTick) {

View File

@ -22,6 +22,7 @@
////////////////////////////////////////////////////////////////////////////////
#include "RestImportHandler.h"
#include "Basics/NumberUtils.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
@ -677,7 +678,7 @@ bool RestImportHandler::createFromKeyValueList() {
std::string const& lineNumValue = _request->value("line", found);
if (found) {
lineNumber = StringUtils::int64(lineNumValue);
lineNumber = NumberUtils::atoi_zero<int64_t>(lineNumValue.data(), lineNumValue.data() + lineNumValue.size());
}
HttpRequest* httpRequest = dynamic_cast<HttpRequest*>(_request.get());

View File

@ -30,7 +30,6 @@
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
#include "Logger/Logger.h"
#include "Cluster/ClusterComm.h"
#include "Cluster/ClusterInfo.h"
#include "Cluster/ClusterMethods.h"
#include "Cluster/ServerState.h"

View File

@ -31,6 +31,7 @@
#include "Basics/ArangoGlobalContext.h"
#include "Basics/FileUtils.h"
#include "Basics/MutexLocker.h"
#include "Basics/NumberUtils.h"
#include "Basics/StringUtils.h"
#include "Basics/WriteLocker.h"
#include "Basics/files.h"
@ -326,9 +327,6 @@ void DatabaseFeature::start() {
// set singleton
DATABASE = this;
// init key generator
KeyGenerator::Initialize();
verifyAppPaths();
// scan all databases
@ -1079,7 +1077,7 @@ TRI_vocbase_t* DatabaseFeature::lookupDatabase(std::string const& name) {
// database names with a number in front are invalid names
if (name[0] >= '0' && name[0] <= '9') {
TRI_voc_tick_t id = StringUtils::uint64(name);
TRI_voc_tick_t id = NumberUtils::atoi_zero<TRI_voc_tick_t>(name.data(), name.data() + name.size());
for (auto& p : theLists->_databases) {
TRI_vocbase_t* vocbase = p.second;
if (vocbase->id() == id) {
@ -1113,7 +1111,7 @@ std::string DatabaseFeature::translateCollectionName(std::string const& dbName,
TRI_ASSERT(vocbase->type() == TRI_VOCBASE_TYPE_COORDINATOR);
CollectionNameResolver resolver(vocbase);
return resolver.getCollectionNameCluster(StringUtils::uint64(collectionName));
return resolver.getCollectionNameCluster(NumberUtils::atoi_zero<TRI_voc_cid_t>(collectionName.data(), collectionName.data() + collectionName.size()));
} else {
auto unuser(_databasesProtector.use());
auto theLists = _databasesLists.load();
@ -1126,7 +1124,7 @@ std::string DatabaseFeature::translateCollectionName(std::string const& dbName,
TRI_vocbase_t* vocbase = (*it).second;
TRI_ASSERT(vocbase != nullptr);
TRI_ASSERT(vocbase->type() == TRI_VOCBASE_TYPE_NORMAL);
return vocbase->collectionName(StringUtils::uint64(collectionName));
return vocbase->collectionName(NumberUtils::atoi_zero<uint64_t>(collectionName.data(), collectionName.data() + collectionName.size()));
}
}

View File

@ -53,21 +53,22 @@ AqlValue RocksDBAqlFunctions::Fulltext(
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "FULLTEXT", 3, 4);
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue collectionValue = ExtractFunctionParameterValue(parameters, 0);
if (!collectionValue.isString()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
}
std::string const cname(collectionValue.slice().copyString());
AqlValue attribute = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue attribute = ExtractFunctionParameterValue(parameters, 1);
if (!attribute.isString()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
}
std::string const attributeName(attribute.slice().copyString());
AqlValue queryValue = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue queryValue = ExtractFunctionParameterValue(parameters, 2);
if (!queryValue.isString()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
@ -76,7 +77,7 @@ AqlValue RocksDBAqlFunctions::Fulltext(
size_t maxResults = 0; // 0 means "all results"
if (parameters.size() >= 4) {
AqlValue limit = ExtractFunctionParameterValue(trx, parameters, 3);
AqlValue limit = ExtractFunctionParameterValue(parameters, 3);
if (!limit.isNull(true) && !limit.isNumber()) {
RegisterWarning(query, "FULLTEXT", TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH);
return AqlValue(AqlValueHintNull());
@ -295,7 +296,7 @@ AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "NEAR", 3, 5);
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue collectionValue = ExtractFunctionParameterValue(parameters, 0);
if (!collectionValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(
TRI_ERROR_QUERY_FUNCTION_ARGUMENT_TYPE_MISMATCH, "NEAR");
@ -303,8 +304,8 @@ AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
std::string const collectionName(collectionValue.slice().copyString());
AqlValue latitude = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue longitude = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue latitude = ExtractFunctionParameterValue(parameters, 1);
AqlValue longitude = ExtractFunctionParameterValue(parameters, 2);
if (!latitude.isNumber() || !longitude.isNumber()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -315,7 +316,7 @@ AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
int64_t limitValue = 100;
if (parameters.size() > 3) {
AqlValue limit = ExtractFunctionParameterValue(trx, parameters, 3);
AqlValue limit = ExtractFunctionParameterValue(parameters, 3);
if (limit.isNumber()) {
limitValue = limit.toInt64(trx);
@ -328,7 +329,7 @@ AqlValue RocksDBAqlFunctions::Near(arangodb::aql::Query* query,
std::string attributeName;
if (parameters.size() > 4) {
// have a distance attribute
AqlValue distanceValue = ExtractFunctionParameterValue(trx, parameters, 4);
AqlValue distanceValue = ExtractFunctionParameterValue(parameters, 4);
if (!distanceValue.isNull(true) && !distanceValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -360,7 +361,7 @@ AqlValue RocksDBAqlFunctions::Within(
VPackFunctionParameters const& parameters) {
ValidateParameters(parameters, "WITHIN", 4, 5);
AqlValue collectionValue = ExtractFunctionParameterValue(trx, parameters, 0);
AqlValue collectionValue = ExtractFunctionParameterValue(parameters, 0);
if (!collectionValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(
@ -369,9 +370,9 @@ AqlValue RocksDBAqlFunctions::Within(
std::string const collectionName(collectionValue.slice().copyString());
AqlValue latitudeValue = ExtractFunctionParameterValue(trx, parameters, 1);
AqlValue longitudeValue = ExtractFunctionParameterValue(trx, parameters, 2);
AqlValue radiusValue = ExtractFunctionParameterValue(trx, parameters, 3);
AqlValue latitudeValue = ExtractFunctionParameterValue(parameters, 1);
AqlValue longitudeValue = ExtractFunctionParameterValue(parameters, 2);
AqlValue radiusValue = ExtractFunctionParameterValue(parameters, 3);
if (!latitudeValue.isNumber() || !longitudeValue.isNumber() ||
!radiusValue.isNumber()) {
@ -382,7 +383,7 @@ AqlValue RocksDBAqlFunctions::Within(
std::string attributeName;
if (parameters.size() > 4) {
// have a distance attribute
AqlValue distanceValue = ExtractFunctionParameterValue(trx, parameters, 4);
AqlValue distanceValue = ExtractFunctionParameterValue(parameters, 4);
if (!distanceValue.isNull(true) && !distanceValue.isString()) {
THROW_ARANGO_EXCEPTION_PARAMS(

View File

@ -657,29 +657,26 @@ bool RocksDBCollection::dropIndex(TRI_idx_iid_t iid) {
}
std::unique_ptr<IndexIterator> RocksDBCollection::getAllIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr, bool reverse) const {
transaction::Methods* trx, bool reverse) const {
return std::unique_ptr<IndexIterator>(new RocksDBAllIndexIterator(
_logicalCollection, trx, mdr, primaryIndex(), reverse));
_logicalCollection, trx, primaryIndex(), reverse));
}
std::unique_ptr<IndexIterator> RocksDBCollection::getAnyIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const {
std::unique_ptr<IndexIterator> RocksDBCollection::getAnyIterator(transaction::Methods* trx) const {
return std::unique_ptr<IndexIterator>(new RocksDBAnyIndexIterator(
_logicalCollection, trx, mdr, primaryIndex()));
_logicalCollection, trx, primaryIndex()));
}
std::unique_ptr<IndexIterator> RocksDBCollection::getSortedAllIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const {
std::unique_ptr<IndexIterator> RocksDBCollection::getSortedAllIterator(transaction::Methods* trx) const {
return std::unique_ptr<RocksDBSortedAllIterator>(new RocksDBSortedAllIterator(
_logicalCollection, trx, mdr, primaryIndex()));
_logicalCollection, trx, primaryIndex()));
}
void RocksDBCollection::invokeOnAllElements(
transaction::Methods* trx,
std::function<bool(LocalDocumentId const&)> callback) {
ManagedDocumentResult mmdr;
std::unique_ptr<IndexIterator> cursor(
this->getAllIterator(trx, &mmdr, false));
this->getAllIterator(trx, false));
bool cnt = true;
auto cb = [&](LocalDocumentId token) {
if (cnt) {
@ -869,13 +866,11 @@ Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
state->prepareOperation(_logicalCollection->cid(), revisionId, StringRef(),
TRI_VOC_DOCUMENT_OPERATION_INSERT);
res = insertDocument(trx, documentId, newSlice, options, options.waitForSync);
res = insertDocument(trx, documentId, newSlice, options);
if (res.ok()) {
Result lookupResult = lookupDocumentVPack(documentId, trx, mdr, false);
if (lookupResult.fail()) {
return lookupResult;
}
trackWaitForSync(trx, options);
mdr.setManaged(newSlice.begin(), documentId);
// report document and key size
RocksDBOperationResult result = state->addOperation(
@ -973,10 +968,11 @@ Result RocksDBCollection::update(arangodb::transaction::Methods* trx,
// add possible log statement under guard
state->prepareOperation(_logicalCollection->cid(), revisionId, StringRef(),
TRI_VOC_DOCUMENT_OPERATION_UPDATE);
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc,
options, options.waitForSync);
res = updateDocument(trx, oldDocumentId, oldDoc, documentId, newDoc, options);
if (res.ok()) {
trackWaitForSync(trx, options);
mdr.setManaged(newDoc.begin(), documentId);
TRI_ASSERT(!mdr.empty());
@ -1071,9 +1067,11 @@ Result RocksDBCollection::replace(
TRI_VOC_DOCUMENT_OPERATION_REPLACE);
RocksDBOperationResult opResult = updateDocument(
trx, oldDocumentId, oldDoc, documentId, newDoc, options,
options.waitForSync);
trx, oldDocumentId, oldDoc, documentId, newDoc, options);
if (opResult.ok()) {
trackWaitForSync(trx, options);
mdr.setManaged(newDoc.begin(), documentId);
TRI_ASSERT(!mdr.empty());
@ -1108,8 +1106,7 @@ Result RocksDBCollection::remove(arangodb::transaction::Methods* trx,
LocalDocumentId const documentId = LocalDocumentId::create();
prevRev = 0;
transaction::BuilderLeaser builder(trx);
newObjectForRemove(trx, slice, documentId, *builder.get(), options.isRestore, revisionId);
revisionId = newRevisionId();
VPackSlice key;
if (slice.isString()) {
@ -1152,9 +1149,11 @@ Result RocksDBCollection::remove(arangodb::transaction::Methods* trx,
// add possible log statement under guard
state->prepareOperation(_logicalCollection->cid(), documentId.id(),
StringRef(key),TRI_VOC_DOCUMENT_OPERATION_REMOVE);
res = removeDocument(trx, oldDocumentId, oldDoc, options, false,
options.waitForSync);
res = removeDocument(trx, oldDocumentId, oldDoc, options);
if (res.ok()) {
trackWaitForSync(trx, options);
// report key size
res = state->addOperation(_logicalCollection->cid(), documentId.id(),
TRI_VOC_DOCUMENT_OPERATION_REMOVE, 0,
@ -1294,11 +1293,10 @@ arangodb::Result RocksDBCollection::fillIndexes(
TRI_ASSERT(trx->state()->collection(_logicalCollection->cid(),
AccessMode::Type::EXCLUSIVE) != nullptr);
ManagedDocumentResult mmdr;
RocksDBIndex* ridx = static_cast<RocksDBIndex*>(added.get());
auto state = RocksDBTransactionState::toState(trx);
std::unique_ptr<IndexIterator> it(new RocksDBAllIndexIterator(
_logicalCollection, trx, &mmdr, primaryIndex(), false));
_logicalCollection, trx, primaryIndex(), false));
// fillindex can be non transactional
rocksdb::DB* db = globalRocksDB()->GetBaseDB();
@ -1344,6 +1342,8 @@ arangodb::Result RocksDBCollection::fillIndexes(
if (!res.ok()) {
it->reset();
batch.Clear();
ManagedDocumentResult mmdr;
arangodb::Result res2; // do not overwrite original error
auto removeCb = [&](LocalDocumentId token) {
@ -1375,7 +1375,7 @@ arangodb::Result RocksDBCollection::fillIndexes(
RocksDBOperationResult RocksDBCollection::insertDocument(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationOptions& options, bool& waitForSync) const {
VPackSlice const& doc, OperationOptions& options) const {
RocksDBOperationResult res;
// Coordinator doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator());
@ -1414,13 +1414,6 @@ RocksDBOperationResult RocksDBCollection::insertDocument(
}
if (res.ok()) {
if (_logicalCollection->waitForSync()) {
waitForSync = true; // output parameter (by ref)
}
if (waitForSync) {
trx->state()->waitForSync(true);
}
_needToPersistIndexEstimates = true;
}
@ -1429,8 +1422,7 @@ RocksDBOperationResult RocksDBCollection::insertDocument(
RocksDBOperationResult RocksDBCollection::removeDocument(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
VPackSlice const& doc, OperationOptions& options, bool isUpdate,
bool& waitForSync) const {
VPackSlice const& doc, OperationOptions& options) const {
// Coordinator doesn't know index internals
TRI_ASSERT(!ServerState::instance()->isCoordinator());
TRI_ASSERT(trx->state()->isRunning());
@ -1441,18 +1433,12 @@ RocksDBOperationResult RocksDBCollection::removeDocument(
blackListKey(key->string().data(), static_cast<uint32_t>(key->string().size()));
// prepare operation which adds log statements is called
// from the outside. We do not need to DELETE a document from the
// document store, if the doc is overwritten with PUT
// Simon: actually we do, because otherwise the counter recovery is broken
// if (!isUpdate) {
RocksDBMethods* mthd = RocksDBTransactionState::toMethods(trx);
RocksDBOperationResult res =
mthd->Delete(RocksDBColumnFamily::documents(), key.ref());
if (!res.ok()) {
return res;
}
//}
/*LOG_TOPIC(ERR, Logger::FIXME)
<< "Delete rev: " << revisionId << " trx: " << trx->state()->id()
@ -1474,13 +1460,6 @@ RocksDBOperationResult RocksDBCollection::removeDocument(
}
if (res.ok()) {
if (_logicalCollection->waitForSync()) {
waitForSync = true;
}
if (waitForSync) {
trx->state()->waitForSync(true);
}
_needToPersistIndexEstimates = true;
}
@ -1507,8 +1486,7 @@ RocksDBOperationResult RocksDBCollection::lookupDocument(
RocksDBOperationResult RocksDBCollection::updateDocument(
transaction::Methods* trx, LocalDocumentId const& oldDocumentId,
VPackSlice const& oldDoc, LocalDocumentId const& newDocumentId,
VPackSlice const& newDoc, OperationOptions& options,
bool& waitForSync) const {
VPackSlice const& newDoc, OperationOptions& options) const {
// keysize in return value is set by insertDocument
// Coordinator doesn't know index internals
@ -1561,13 +1539,6 @@ RocksDBOperationResult RocksDBCollection::updateDocument(
}
if (res.ok()) {
if (_logicalCollection->waitForSync()) {
waitForSync = true;
}
if (waitForSync) {
trx->state()->waitForSync(true);
}
_needToPersistIndexEstimates = true;
}
@ -2033,3 +2004,14 @@ void RocksDBCollection::blackListKey(char const* data, std::size_t len) const {
}
}
}
void RocksDBCollection::trackWaitForSync(arangodb::transaction::Methods* trx,
OperationOptions& options) {
if (_logicalCollection->waitForSync()) {
options.waitForSync = true;
}
if (options.waitForSync) {
trx->state()->waitForSync(true);
}
}

View File

@ -107,13 +107,12 @@ class RocksDBCollection final : public PhysicalCollection {
/// @brief Drop an index with the given iid.
bool dropIndex(TRI_idx_iid_t iid) override;
std::unique_ptr<IndexIterator> getAllIterator(transaction::Methods* trx,
ManagedDocumentResult* mdr,
bool reverse) const override;
std::unique_ptr<IndexIterator> getAnyIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const override;
transaction::Methods* trx) const override;
std::unique_ptr<IndexIterator> getSortedAllIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const;
transaction::Methods* trx) const;
void invokeOnAllElements(
transaction::Methods* trx,
@ -205,6 +204,9 @@ class RocksDBCollection final : public PhysicalCollection {
inline bool cacheEnabled() const { return _cacheEnabled; }
private:
/// @brief track the usage of waitForSync option in an operation
void trackWaitForSync(arangodb::transaction::Methods* trx, OperationOptions& options);
/// @brief return engine-specific figures
void figuresSpecific(
std::shared_ptr<arangodb::velocypack::Builder>&) override;
@ -229,13 +231,11 @@ class RocksDBCollection final : public PhysicalCollection {
arangodb::RocksDBOperationResult insertDocument(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc, OperationOptions& options,
bool& waitForSync) const;
arangodb::velocypack::Slice const& doc, OperationOptions& options) const;
arangodb::RocksDBOperationResult removeDocument(
arangodb::transaction::Methods* trx, LocalDocumentId const& documentId,
arangodb::velocypack::Slice const& doc, OperationOptions& options,
bool isUpdate, bool& waitForSync) const;
arangodb::velocypack::Slice const& doc, OperationOptions& options) const;
arangodb::RocksDBOperationResult lookupDocument(
transaction::Methods* trx, arangodb::velocypack::Slice const& key,
@ -245,8 +245,7 @@ class RocksDBCollection final : public PhysicalCollection {
transaction::Methods* trx, LocalDocumentId const& oldDocumentId,
arangodb::velocypack::Slice const& oldDoc,
LocalDocumentId const& newDocumentId,
arangodb::velocypack::Slice const& newDoc, OperationOptions& options,
bool& waitForSync) const;
arangodb::velocypack::Slice const& newDoc, OperationOptions& options) const;
arangodb::Result lookupDocumentVPack(LocalDocumentId const& documentId,
transaction::Methods*,

View File

@ -44,20 +44,20 @@ struct RocksDBColumnFamily {
static constexpr size_t minNumberOfColumnFamilies = 7;
static constexpr size_t numberOfColumnFamilies = 7;
static inline rocksdb::ColumnFamilyHandle* definitions() { return _definitions; }
static rocksdb::ColumnFamilyHandle* definitions() { return _definitions; }
static inline rocksdb::ColumnFamilyHandle* documents() { return _documents; }
static rocksdb::ColumnFamilyHandle* documents() { return _documents; }
static inline rocksdb::ColumnFamilyHandle* primary() { return _primary; }
static rocksdb::ColumnFamilyHandle* primary() { return _primary; }
static inline rocksdb::ColumnFamilyHandle* edge() { return _edge; }
static rocksdb::ColumnFamilyHandle* edge() { return _edge; }
/// unique and non unique vpack indexes (skiplist, permanent indexes)
static inline rocksdb::ColumnFamilyHandle* vpack() { return _vpack; }
static rocksdb::ColumnFamilyHandle* vpack() { return _vpack; }
static inline rocksdb::ColumnFamilyHandle* geo() { return _geo; }
static rocksdb::ColumnFamilyHandle* geo() { return _geo; }
static inline rocksdb::ColumnFamilyHandle* fulltext() { return _fulltext; }
static rocksdb::ColumnFamilyHandle* fulltext() { return _fulltext; }
static rocksdb::ColumnFamilyHandle* invalid() { return rocksutils::defaultCF(); }

View File

@ -578,13 +578,13 @@ IndexIterator* RocksDBEdgeIndex::iteratorForCondition(
// a.b IN values
if (!valNode->isArray()) {
// a.b IN non-array
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
return createInIterator(trx, mmdr, attrNode, valNode);
}
// operator type unsupported
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
/// @brief specializes the condition for use with the index

View File

@ -25,6 +25,7 @@
#include "RocksDBEngine/RocksDBExportCursor.h"
#include "Basics/WriteLocker.h"
#include "Indexes/IndexIterator.h"
#include "Logger/Logger.h"
#include "RocksDBEngine/RocksDBCollection.h"
#include "StorageEngine/EngineSelectorFeature.h"
#include "StorageEngine/PhysicalCollection.h"
@ -35,8 +36,6 @@
#include "VocBase/LogicalCollection.h"
#include "VocBase/vocbase.h"
#include "Logger/Logger.h"
#include <velocypack/Builder.h>
#include <velocypack/Dumper.h>
#include <velocypack/Iterator.h>
@ -53,8 +52,7 @@ RocksDBExportCursor::RocksDBExportCursor(
_guard(vocbase),
_resolver(vocbase),
_restrictions(restrictions),
_name(name),
_mdr() {
_name(name) {
// prevent the collection from being unloaded while the export is ongoing
// this may throw
_collectionGuard.reset(
@ -76,7 +74,7 @@ RocksDBExportCursor::RocksDBExportCursor(
auto rocksCollection =
static_cast<RocksDBCollection*>(_collection->getPhysical());
_iter = rocksCollection->getAllIterator(_trx.get(), &_mdr, false);
_iter = rocksCollection->getAllIterator(_trx.get(), false);
_size = _collection->numberDocuments(_trx.get());
if (limit > 0 && limit < _size) {

View File

@ -30,7 +30,6 @@
#include "Utils/CollectionNameResolver.h"
#include "Utils/Cursor.h"
#include "Utils/DatabaseGuard.h"
#include "VocBase/ManagedDocumentResult.h"
#include "VocBase/voc-types.h"
namespace arangodb {
@ -67,7 +66,6 @@ class RocksDBExportCursor final : public Cursor {
std::unique_ptr<arangodb::CollectionGuard> _collectionGuard;
LogicalCollection* _collection;
std::unique_ptr<transaction::Methods> _trx;
ManagedDocumentResult _mdr;
std::unique_ptr<IndexIterator> _iter;
size_t _size;
};

View File

@ -469,7 +469,6 @@ IndexIterator* RocksDBFulltextIndex::iteratorForCondition(transaction::Methods*
size_t numMembers = args->numMembers();
TRI_ASSERT(numMembers == 3 || numMembers == 4);
std::string attr = args->getMember(1)->getString();
std::string query = args->getMember(2)->getString();
FulltextQuery parsedQuery;

View File

@ -157,7 +157,7 @@ public:
TRI_ASSERT(limit > 0);
while (_pos != _docs.end() && limit > 0) {
cb(*_pos);
_pos++;
++_pos;
limit--;
}
return _pos != _docs.end();
@ -167,7 +167,7 @@ public:
void skip(uint64_t count, uint64_t& skipped) override {
while (_pos != _docs.end()) {
_pos++;
++_pos;
skipped++;
}
}

View File

@ -441,7 +441,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
LogicalCollection* coll = trx.documentCollection();
auto ph = static_cast<RocksDBCollection*>(coll->getPhysical());
std::unique_ptr<IndexIterator> iterator =
ph->getSortedAllIterator(&trx, &mmdr);
ph->getSortedAllIterator(&trx);
iterator->next(
[&](LocalDocumentId const& token) {
if (coll->readDocument(&trx, token, mmdr) == false) {
@ -589,7 +589,7 @@ Result handleSyncKeysRocksDB(DatabaseInitialSyncer& syncer,
auto ph = static_cast<RocksDBCollection*>(col->getPhysical());
std::unique_ptr<IndexIterator> iterator =
ph->getSortedAllIterator(&trx, &mmdr);
ph->getSortedAllIterator(&trx);
iterator->next(
[&](LocalDocumentId const& token) {
if (col->readDocument(&trx, token, mmdr) == false) {

View File

@ -40,8 +40,8 @@ constexpr bool AnyIteratorFillBlockCache = false;
RocksDBAllIndexIterator::RocksDBAllIndexIterator(
LogicalCollection* col, transaction::Methods* trx,
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index, bool reverse)
: IndexIterator(col, trx, mmdr, index),
RocksDBPrimaryIndex const* index, bool reverse)
: IndexIterator(col, trx, index),
_reverse(reverse),
_bounds(RocksDBKeyBounds::CollectionDocuments(
static_cast<RocksDBCollection*>(col->getPhysical())->objectId())),
@ -175,8 +175,8 @@ void RocksDBAllIndexIterator::reset() {
RocksDBAnyIndexIterator::RocksDBAnyIndexIterator(
LogicalCollection* col, transaction::Methods* trx,
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index)
: IndexIterator(col, trx, mmdr, index),
RocksDBPrimaryIndex const* index)
: IndexIterator(col, trx, index),
_cmp(RocksDBColumnFamily::documents()->GetComparator()),
_bounds(RocksDBKeyBounds::CollectionDocuments(
static_cast<RocksDBCollection*>(col->getPhysical())->objectId())),
@ -279,8 +279,8 @@ bool RocksDBAnyIndexIterator::outOfRange() const {
RocksDBSortedAllIterator::RocksDBSortedAllIterator(
LogicalCollection* collection, transaction::Methods* trx,
ManagedDocumentResult* mmdr, RocksDBPrimaryIndex const* index)
: IndexIterator(collection, trx, mmdr, index),
RocksDBPrimaryIndex const* index)
: IndexIterator(collection, trx, index),
_trx(trx),
_bounds(RocksDBKeyBounds::PrimaryIndex(index->objectId())),
_cmp(index->comparator()) {

View File

@ -47,7 +47,6 @@ class RocksDBAllIndexIterator final : public IndexIterator {
public:
RocksDBAllIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryIndex const* index, bool reverse);
~RocksDBAllIndexIterator() {}
@ -73,7 +72,6 @@ class RocksDBAnyIndexIterator final : public IndexIterator {
public:
RocksDBAnyIndexIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryIndex const* index);
~RocksDBAnyIndexIterator() {}
@ -103,7 +101,6 @@ class RocksDBSortedAllIterator final : public IndexIterator {
public:
RocksDBSortedAllIterator(LogicalCollection* collection,
transaction::Methods* trx,
ManagedDocumentResult* mmdr,
RocksDBPrimaryIndex const* index);
~RocksDBSortedAllIterator() {}

View File

@ -166,7 +166,7 @@ class RocksDBKey {
/// May be called on any valid key (in our keyspace)
//////////////////////////////////////////////////////////////////////////////
static RocksDBEntryType type(RocksDBKey const&);
static inline RocksDBEntryType type(rocksdb::Slice const& slice) {
static RocksDBEntryType type(rocksdb::Slice const& slice) {
return type(slice.data(), slice.size());
}
@ -269,7 +269,7 @@ class RocksDBKey {
}
private:
static inline RocksDBEntryType type(char const* data, size_t size) {
static RocksDBEntryType type(char const* data, size_t size) {
TRI_ASSERT(data != nullptr);
TRI_ASSERT(size >= sizeof(char));

View File

@ -321,14 +321,14 @@ IndexIterator* RocksDBPrimaryIndex::iteratorForCondition(
// a.b IN values
if (!valNode->isArray()) {
// a.b IN non-array
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
return createInIterator(trx, mmdr, attrNode, valNode);
}
// operator type unsupported
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
/// @brief specializes the condition for use with the index

View File

@ -25,6 +25,7 @@
#include "RocksDBRecoveryManager.h"
#include "ApplicationFeatures/ApplicationServer.h"
#include "Basics/NumberUtils.h"
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
#include "Basics/WriteLocker.h"
@ -118,7 +119,7 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
uint64_t _maxHLC = 0;
public:
explicit WBReader(std::unordered_map<uint64_t, rocksdb::SequenceNumber> seqs)
explicit WBReader(std::unordered_map<uint64_t, rocksdb::SequenceNumber> const& seqs)
: currentSeqNum(0), _seqStart(seqs) {}
~WBReader() {
@ -235,19 +236,17 @@ class WBReader final : public rocksdb::WriteBatch::Handler {
// check if the key is numeric
if (ref[0] >= '1' && ref[0] <= '9') {
// numeric start byte. looks good
try {
// extract uint64_t value from key. this will throw if the key
// is non-numeric
uint64_t tick =
basics::StringUtils::uint64_check(ref.data(), ref.size());
bool valid;
uint64_t tick =
NumberUtils::atoi<uint64_t>(ref.data(), ref.data() + ref.size(), valid);
if (valid) {
// if no previous _maxTick set or the numeric value found is
// "near" our previous _maxTick, then we update it
if (tick > _maxTick && (_maxTick == 0 || tick - _maxTick < 2048)) {
storeMaxTick(tick);
}
} catch (...) {
// non-numeric key. simply ignore it
}
}
// else we got a non-numeric key. simply ignore it
}
} else if (column_family_id ==
RocksDBColumnFamily::definitions()->GetID()) {

View File

@ -141,8 +141,7 @@ int RocksDBReplicationContext::bindCollection(
_trx->addCollectionAtRuntime(_collection->name());
_iter = static_cast<RocksDBCollection*>(_collection->getPhysical())
->getSortedAllIterator(_trx.get(),
&_mdr); //_mdr is not used nor updated
->getSortedAllIterator(_trx.get());
_currentTick = 1;
_hasMore = true;
}

View File

@ -430,8 +430,8 @@ class WALParser : public rocksdb::WriteBatch::Handler {
void writeCommitMarker() {
TRI_ASSERT(_seenBeginTransaction && !_singleOp);
LOG_TOPIC(_LOG, Logger::PREGEL) << "tick: " << _currentSequence
<< " commit transaction";
LOG_TOPIC(_LOG, Logger::ROCKSDB) << "tick: " << _currentSequence
<< " commit transaction";
_builder.openObject();
_builder.add("tick", VPackValue(std::to_string(_currentSequence)));

View File

@ -108,7 +108,10 @@ bool RocksDBTransactionCollection::isLocked(AccessMode::Type accessType,
/// @brief check whether a collection is locked at all
bool RocksDBTransactionCollection::isLocked() const {
if (CollectionLockState::_noLockHeaders != nullptr && _collection != nullptr) {
if (_collection == nullptr) {
return false;
}
if (CollectionLockState::_noLockHeaders != nullptr) {
std::string collName(_collection->name());
auto it = CollectionLockState::_noLockHeaders->find(collName);
if (it != CollectionLockState::_noLockHeaders->end()) {

View File

@ -1344,7 +1344,7 @@ IndexIterator* RocksDBVPackIndex::iteratorForCondition(
// unsupported right now. Should have been rejected by
// supportsFilterCondition
TRI_ASSERT(false);
return new EmptyIndexIterator(_collection, trx, mmdr, this);
return new EmptyIndexIterator(_collection, trx, this);
}
value->toVelocyPackValue(searchValues);
}

View File

@ -24,6 +24,7 @@
#include "RocksDBValue.h"
#include "Basics/Exceptions.h"
#include "Basics/NumberUtils.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "RocksDBEngine/RocksDBCommon.h"
@ -188,9 +189,10 @@ uint64_t RocksDBValue::keyValue(char const* data, size_t size) {
VPackSlice slice(data);
VPackSlice key = slice.get(StaticStrings::KeyString);
if (key.isString()) {
std::string s = key.copyString();
if (s.size() > 0 && s[0] >= '0' && s[0] <= '9') {
return basics::StringUtils::uint64(s);
VPackValueLength l;
char const* p = key.getString(l);
if (l > 0 && *p >= '0' && *p <= '9') {
return NumberUtils::atoi_zero<uint64_t>(p, p + l);
}
}

View File

@ -98,9 +98,9 @@ class Scheduler {
uint64_t minimum() const { return _nrMinimum; }
inline uint64_t numQueued() const noexcept { return _nrQueued; };
inline uint64_t getCounters() const noexcept { return _counters; }
static inline uint64_t numRunning(uint64_t value) noexcept { return value & 0xFFFFULL; }
static inline uint64_t numWorking(uint64_t value) noexcept { return (value >> 16) & 0xFFFFULL; }
static inline uint64_t numBlocked(uint64_t value) noexcept { return (value >> 32) & 0xFFFFULL; }
static uint64_t numRunning(uint64_t value) noexcept { return value & 0xFFFFULL; }
static uint64_t numWorking(uint64_t value) noexcept { return (value >> 16) & 0xFFFFULL; }
static uint64_t numBlocked(uint64_t value) noexcept { return (value >> 32) & 0xFFFFULL; }
inline void queueJob() noexcept { ++_nrQueued; }
inline void unqueueJob() noexcept {

View File

@ -129,6 +129,13 @@ class RequestStatistics {
stat->_requestEnd = StatisticsFeature::time();
}
}
static void SET_REQUEST_START_END(RequestStatistics* stat) {
if (stat != nullptr) {
stat->_requestStart = StatisticsFeature::time();
stat->_requestEnd = StatisticsFeature::time();
}
}
static double ELAPSED_SINCE_READ_START(RequestStatistics* stat) {
if (stat != nullptr) {

View File

@ -89,6 +89,10 @@ std::shared_ptr<Index> PhysicalCollection::lookupIndex(
}
return nullptr;
}
TRI_voc_rid_t PhysicalCollection::newRevisionId() const {
return TRI_HybridLogicalClock();
}
/// @brief merge two objects for update, oldValue must have correctly set
/// _key and _id attributes
@ -172,7 +176,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
}
}
if (!handled) {
revisionId = TRI_HybridLogicalClock();
revisionId = newRevisionId();
b.add(StaticStrings::RevString, VPackValue(TRI_RidToString(revisionId)));
}
@ -309,7 +313,7 @@ int PhysicalCollection::newObjectForInsert(
}
}
if (!handled) {
revisionId = TRI_HybridLogicalClock();
revisionId = newRevisionId();
builder.add(StaticStrings::RevString, VPackValue(TRI_RidToString(revisionId)));
}
@ -335,7 +339,7 @@ void PhysicalCollection::newObjectForRemove(transaction::Methods* trx,
TRI_ASSERT(s.isString());
builder.add(StaticStrings::KeyString, s);
}
revisionId = TRI_HybridLogicalClock();
revisionId = newRevisionId();
builder.add(StaticStrings::RevString, VPackValue(TRI_RidToString(revisionId)));
builder.close();
}
@ -384,7 +388,7 @@ void PhysicalCollection::newObjectForReplace(
}
}
if (!handled) {
revisionId = TRI_HybridLogicalClock();
revisionId = newRevisionId();
builder.add(StaticStrings::RevString, VPackValue(TRI_RidToString(revisionId)));
}

View File

@ -125,9 +125,9 @@ class PhysicalCollection {
virtual bool dropIndex(TRI_idx_iid_t iid) = 0;
virtual std::unique_ptr<IndexIterator> getAllIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr, bool reverse) const = 0;
transaction::Methods* trx, bool reverse) const = 0;
virtual std::unique_ptr<IndexIterator> getAnyIterator(
transaction::Methods* trx, ManagedDocumentResult* mdr) const = 0;
transaction::Methods* trx) const = 0;
virtual void invokeOnAllElements(
transaction::Methods* trx,
std::function<bool(LocalDocumentId const&)> callback) = 0;
@ -214,6 +214,8 @@ class PhysicalCollection {
// SECTION: Document pre commit preperation
TRI_voc_rid_t newRevisionId() const;
/// @brief new object for insert, value must have _key set correctly.
int newObjectForInsert(transaction::Methods* trx,
velocypack::Slice const& value,

View File

@ -29,6 +29,7 @@
#include "Aql/SortCondition.h"
#include "Basics/AttributeNameParser.h"
#include "Basics/Exceptions.h"
#include "Basics/NumberUtils.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringUtils.h"
#include "Basics/VelocyPackHelper.h"
@ -832,9 +833,8 @@ OperationResult transaction::Methods::anyLocal(
return OperationResult(lockResult);
}
ManagedDocumentResult mmdr;
std::unique_ptr<OperationCursor> cursor =
indexScan(collectionName, transaction::Methods::CursorType::ANY, &mmdr, false);
indexScan(collectionName, transaction::Methods::CursorType::ANY, false);
cursor->allDocuments([&resultBuilder](LocalDocumentId const& token, VPackSlice slice) {
resultBuilder.add(slice);
@ -1396,7 +1396,7 @@ OperationResult transaction::Methods::insertLocal(
auto workForOneDocument = [&](VPackSlice const value) -> Result {
if (!value.isObject()) {
return TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID;
return Result(TRI_ERROR_ARANGO_DOCUMENT_TYPE_INVALID);
}
ManagedDocumentResult result;
@ -1419,16 +1419,18 @@ OperationResult transaction::Methods::insertLocal(
TRI_ASSERT(!result.empty());
StringRef keyString(transaction::helpers::extractKeyFromDocument(
VPackSlice(result.vpack())));
if (!options.silent || _state->isDBServer()) {
StringRef keyString(transaction::helpers::extractKeyFromDocument(
VPackSlice(result.vpack())));
buildDocumentIdentity(collection, resultBuilder, cid, keyString, revisionId,
0, nullptr, options.returnNew ? &result : nullptr);
buildDocumentIdentity(collection, resultBuilder, cid, keyString, revisionId,
0, nullptr, options.returnNew ? &result : nullptr);
}
return TRI_ERROR_NO_ERROR;
return Result();
};
Result res = TRI_ERROR_NO_ERROR;
Result res;
bool const multiCase = value.isArray();
std::unordered_map<int, size_t> countErrorCodes;
if (multiCase) {
@ -1758,12 +1760,14 @@ OperationResult transaction::Methods::modifyLocal(
TRI_ASSERT(!result.empty());
TRI_ASSERT(!previous.empty());
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(collection, resultBuilder, cid, key,
TRI_ExtractRevisionId(VPackSlice(result.vpack())),
actualRevision,
options.returnOld ? &previous : nullptr,
options.returnNew ? &result : nullptr);
if (!options.silent || _state->isDBServer()) {
StringRef key(newVal.get(StaticStrings::KeyString));
buildDocumentIdentity(collection, resultBuilder, cid, key,
TRI_ExtractRevisionId(VPackSlice(result.vpack())),
actualRevision,
options.returnOld ? &previous : nullptr,
options.returnNew ? &result : nullptr);
}
return res; // must be ok!
};
@ -2048,13 +2052,15 @@ OperationResult transaction::Methods::removeLocal(
}
TRI_ASSERT(!previous.empty());
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision,
0, options.returnOld ? &previous : nullptr, nullptr);
if (!options.silent || _state->isDBServer()) {
buildDocumentIdentity(collection, resultBuilder, cid, key, actualRevision,
0, options.returnOld ? &previous : nullptr, nullptr);
}
return Result(TRI_ERROR_NO_ERROR);
return Result();
};
Result res(TRI_ERROR_NO_ERROR);
Result res;
bool multiCase = value.isArray();
std::unordered_map<int, size_t> countErrorCodes;
if (multiCase) {
@ -2237,9 +2243,8 @@ OperationResult transaction::Methods::allLocal(
return OperationResult(lockResult);
}
ManagedDocumentResult mmdr;
std::unique_ptr<OperationCursor> cursor =
indexScan(collectionName, transaction::Methods::CursorType::ALL, &mmdr, false);
indexScan(collectionName, transaction::Methods::CursorType::ALL, false);
if (cursor->fail()) {
return OperationResult(cursor->code);
@ -2715,7 +2720,6 @@ OperationCursor* transaction::Methods::indexScanForCondition(
/// calling this method
std::unique_ptr<OperationCursor> transaction::Methods::indexScan(
std::string const& collectionName, CursorType cursorType,
ManagedDocumentResult* mmdr,
bool reverse) {
// For now we assume indexId is the iid part of the index.
@ -2739,11 +2743,11 @@ std::unique_ptr<OperationCursor> transaction::Methods::indexScan(
std::unique_ptr<IndexIterator> iterator = nullptr;
switch (cursorType) {
case CursorType::ANY: {
iterator = logical->getAnyIterator(this, mmdr);
iterator = logical->getAnyIterator(this);
break;
}
case CursorType::ALL: {
iterator = logical->getAllIterator(this, mmdr, reverse);
iterator = logical->getAllIterator(this, reverse);
break;
}
}
@ -3070,10 +3074,9 @@ Result transaction::Methods::resolveId(char const* handle, size_t length,
}
if (*handle >= '0' && *handle <= '9') {
cid = arangodb::basics::StringUtils::uint64(handle, p - handle);
cid = NumberUtils::atoi_zero<TRI_voc_cid_t>(handle, p);
} else {
std::string const name(handle, p - handle);
cid = resolver()->getCollectionIdCluster(name);
cid = resolver()->getCollectionIdCluster(std::string(handle, p - handle));
}
if (cid == 0) {

View File

@ -365,7 +365,6 @@ class Methods {
ENTERPRISE_VIRT
std::unique_ptr<OperationCursor> indexScan(std::string const& collectionName,
CursorType cursorType,
ManagedDocumentResult*,
bool reverse);
/// @brief test if a collection is already locked

Some files were not shown because too many files have changed in this diff Show More