1
0
Fork 0

upgrade vpack library (#10314)

This commit is contained in:
Jan 2019-10-25 11:04:16 +02:00 committed by GitHub
parent 46e98d7110
commit 359ee03dd8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 269 additions and 158 deletions

View File

@ -55,11 +55,12 @@ class Buffer {
Buffer(Buffer const& that) : Buffer() {
if (that._size > 0) {
if (that._size > sizeof(_local)) {
_buffer = static_cast<T*>(malloc(checkOverflow(sizeof(T) * that._size)));
if (that._size > sizeof(that._local)) {
_buffer = static_cast<T*>(malloc(checkOverflow(that._size)));
ensureValidPointer(_buffer);
_capacity = that._size;
} else {
VELOCYPACK_ASSERT(_buffer == &_local[0]);
_capacity = sizeof(_local);
}
memcpy(_buffer, that._buffer, checkOverflow(that._size));
@ -73,10 +74,9 @@ class Buffer {
// our own buffer is big enough to hold the data
initWithNone();
memcpy(_buffer, that._buffer, checkOverflow(that._size));
}
else {
} else {
// our own buffer is not big enough to hold the data
T* buffer = static_cast<T*>(malloc(checkOverflow(sizeof(T) * that._size)));
T* buffer = static_cast<T*>(malloc(checkOverflow(that._size)));
ensureValidPointer(buffer);
buffer[0] = '\x00';
memcpy(buffer, that._buffer, checkOverflow(that._size));
@ -94,8 +94,11 @@ class Buffer {
}
Buffer(Buffer&& that) noexcept : _buffer(_local), _capacity(sizeof(_local)) {
poison(_buffer, _capacity);
initWithNone();
if (that._buffer == that._local) {
memcpy(_buffer, that._buffer, static_cast<std::size_t>(that._size));
VELOCYPACK_ASSERT(that._capacity == sizeof(that._local));
memcpy(_buffer, that._buffer, checkOverflow(that._size));
} else {
_buffer = that._buffer;
_capacity = that._capacity;
@ -104,16 +107,20 @@ class Buffer {
}
_size = that._size;
that._size = 0;
that.initWithNone();
}
Buffer& operator=(Buffer&& that) noexcept {
if (this != &that) {
if (_buffer != _local) {
free(_buffer);
}
if (that._buffer == that._local) {
memcpy(_buffer, that._buffer, static_cast<std::size_t>(that._size));
_buffer = _local;
_capacity = sizeof(_local);
initWithNone();
memcpy(_buffer, that._buffer, checkOverflow(that._size));
} else {
if (_buffer != _local) {
free(_buffer);
}
_buffer = that._buffer;
_capacity = that._capacity;
that._buffer = that._local;
@ -121,6 +128,7 @@ class Buffer {
}
_size = that._size;
that._size = 0;
that.initWithNone();
}
return *this;
}
@ -281,11 +289,11 @@ class Buffer {
VELOCYPACK_ASSERT(newLen > 0);
T* p;
if (_buffer != _local) {
p = static_cast<T*>(realloc(_buffer, checkOverflow(sizeof(T) * newLen)));
p = static_cast<T*>(realloc(_buffer, checkOverflow(newLen)));
ensureValidPointer(p);
// realloc will have copied the old data
} else {
p = static_cast<T*>(malloc(checkOverflow(sizeof(T) * newLen)));
p = static_cast<T*>(malloc(checkOverflow(newLen)));
ensureValidPointer(p);
// copy existing data into buffer
memcpy(p, _buffer, checkOverflow(_size));

View File

@ -91,37 +91,58 @@ class Builder {
public:
Options const* options;
// create an empty Builder, using default Options
Builder();
explicit Builder(Options const* options);
// create an empty Builder, using Options
explicit Builder(Options const* options = &Options::Defaults);
// create an empty Builder, using an existing buffer
explicit Builder(std::shared_ptr<Buffer<uint8_t>> const& buffer,
Options const* options = &Options::Defaults);
// create a Builder that uses an existing Buffer. the Builder will not
// claim ownership for this Buffer
explicit Builder(Buffer<uint8_t>& buffer,
Options const* options = &Options::Defaults);
// populate a Builder from a Slice
explicit Builder(Slice slice, Options const* options = &Options::Defaults);
~Builder() = default;
Builder(Builder const& that);
Builder& operator=(Builder const& that);
Builder(Builder&& that);
Builder& operator=(Builder&& that);
Builder(Builder&& that) noexcept;
Builder& operator=(Builder&& that) noexcept;
// get a const reference to the Builder's Buffer object
std::shared_ptr<Buffer<uint8_t>> const& buffer() const { return _buffer; }
// get a reference to the Builder's Buffer object
// note: this object may be a nullptr if the buffer was already stolen
// from the Builder, or if the Builder has no ownership for the Buffer
std::shared_ptr<Buffer<uint8_t>> const& buffer() const {
return _buffer;
}
Buffer<uint8_t>& bufferRef() const {
if (_bufferPtr == nullptr) {
throw Exception(Exception::InternalError, "Builder has no Buffer");
}
return *_bufferPtr;
}
// steal the Builder's Buffer object. afterwards the Builder
// is unusable
// is unusable - note: this may return a nullptr if the Builder does not
// own the Buffer!
std::shared_ptr<Buffer<uint8_t>> steal() {
// After a steal the Builder is broken!
std::shared_ptr<Buffer<uint8_t>> res = _buffer;
_buffer.reset();
std::shared_ptr<Buffer<uint8_t>> res(std::move(_buffer));
_bufferPtr = nullptr;
_pos = 0;
_start = nullptr;
clear();
return res;
}
uint8_t const* data() const noexcept { return _bufferPtr->data(); }
uint8_t const* data() const noexcept {
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
return _bufferPtr->data();
}
std::string toString() const;
@ -153,6 +174,7 @@ class Builder {
(void) checkOverflow(_pos + len);
#endif
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->reserve(len);
_start = _bufferPtr->data();
}
@ -161,8 +183,11 @@ class Builder {
void clear() noexcept {
_pos = 0;
_stack.clear();
VELOCYPACK_ASSERT(_bufferPtr != nullptr);
_bufferPtr->reset();
_index.clear();
if (_bufferPtr != nullptr) {
_bufferPtr->reset();
_start = _bufferPtr->data();
}
_keyWritten = false;
}
@ -564,8 +589,8 @@ class Builder {
uint8_t* addInternal(char const* attrName, std::size_t attrLength, T const& sub) {
bool haveReported = false;
if (!_stack.empty()) {
ValueLength& tos = _stack.back();
if (VELOCYPACK_UNLIKELY(_start[tos] != 0x0b && _start[tos] != 0x14)) {
ValueLength const to = _stack.back();
if (VELOCYPACK_UNLIKELY(_start[to] != 0x0b && _start[to] != 0x14)) {
throw Exception(Exception::BuilderNeedOpenObject);
}
if (VELOCYPACK_UNLIKELY(_keyWritten)) {
@ -621,9 +646,9 @@ class Builder {
void openCompoundValue(uint8_t type) {
bool haveReported = false;
if (!_stack.empty()) {
ValueLength& tos = _stack.back();
ValueLength const to = _stack.back();
if (!_keyWritten) {
if (VELOCYPACK_UNLIKELY(_start[tos] != 0x06 && _start[tos] != 0x13)) {
if (VELOCYPACK_UNLIKELY(_start[to] != 0x06 && _start[to] != 0x13)) {
throw Exception(Exception::BuilderNeedOpenArray);
}
reportAdd();
@ -797,11 +822,13 @@ struct ObjectBuilder final : public BuilderContainer,
}
~ObjectBuilder() {
try {
builder->close();
if (!builder->isClosed()) {
builder->close();
}
} catch (...) {
// destructors must not throw. however, we can at least
// signal something is very wrong in debug mode
VELOCYPACK_ASSERT(false);
VELOCYPACK_ASSERT(builder->isClosed());
}
}
};
@ -826,11 +853,13 @@ struct ArrayBuilder final : public BuilderContainer,
}
~ArrayBuilder() {
try {
builder->close();
if (!builder->isClosed()) {
builder->close();
}
} catch (...) {
// destructors must not throw. however, we can at least
// signal something is very wrong in debug mode
VELOCYPACK_ASSERT(false);
VELOCYPACK_ASSERT(builder->isClosed());
}
}
};

View File

@ -41,14 +41,15 @@ struct HexDump {
HexDump() = delete;
HexDump(Slice const& slice, int valuesPerLine = 16,
std::string const& separator = " ")
: slice(slice), valuesPerLine(valuesPerLine), separator(separator) {}
std::string const& separator = " ", std::string const& header = "0x")
: slice(slice), valuesPerLine(valuesPerLine), separator(separator), header(header) {}
HexDump(Slice const* slice, int valuesPerLine = 16,
std::string const& separator = " ")
: HexDump(*slice, valuesPerLine, separator) {}
std::string const& separator = " ", std::string const& header = "0x")
: HexDump(*slice, valuesPerLine, separator, header) {}
static std::string toHex(uint8_t value);
static std::string toHex(uint8_t value, std::string const& header = "0x");
static void appendHex(std::string& result, uint8_t value);
std::string toString() const;
friend std::ostream& operator<<(std::ostream&, HexDump const&);
@ -56,6 +57,7 @@ struct HexDump {
Slice const slice;
int valuesPerLine;
std::string separator;
std::string header;
};
} // namespace arangodb::velocypack

View File

@ -39,24 +39,48 @@ struct Options;
class Slice;
struct CustomTypeHandler {
virtual ~CustomTypeHandler() {}
virtual ~CustomTypeHandler() = default;
virtual void dump(Slice const&, Dumper*, Slice const&);
virtual std::string toString(Slice const&, Options const*, Slice const&);
};
struct Options {
// Behavior to be applied when dumping VelocyPack values that cannot be
// expressed in JSON without data loss
enum UnsupportedTypeBehavior {
// convert any non-JSON-representable value to null
NullifyUnsupportedType,
// emit a JSON string "(non-representable type ...)"
ConvertUnsupportedType,
// throw an exception for any non-JSON-representable value
FailOnUnsupportedType
};
// Behavior to be applied when building VelocyPack Array/Object values
// with a Builder
enum PaddingBehavior {
// use padding - fill unused head bytes with zero-bytes (ASCII NUL) in
// order to avoid a later memmove
UsePadding,
// don't pad and do not fill any gaps with zero-bytes (ASCII NUL).
// instead, memmove data down so there is no gap between the head bytes
// and the payload
NoPadding,
// pad in cases the Builder considers it useful, and don't pad in other
// cases when the Builder doesn't consider it useful
Flexible
};
Options() {}
// Dumper behavior when a VPack value is serialized to JSON that
// has no JSON equivalent
UnsupportedTypeBehavior unsupportedTypeBehavior = FailOnUnsupportedType;
// Builder behavior w.r.t. padding or memmoving data
PaddingBehavior paddingBehavior = PaddingBehavior::Flexible;
// custom attribute translator for integer keys
AttributeTranslator* attributeTranslator = nullptr;
// custom type handler used for processing custom types by Dumper and Slicer

View File

@ -42,7 +42,7 @@ struct Sink {
Sink(Sink const&) = delete;
Sink& operator=(Sink const&) = delete;
virtual ~Sink() {}
virtual ~Sink() = default;
virtual void push_back(char c) = 0;
virtual void append(std::string const& p) = 0;
virtual void append(char const* p) = 0;

View File

@ -47,13 +47,12 @@
namespace arangodb {
namespace velocypack {
// This class provides read only access to a VPack value, it is
// intentionally light-weight (only one pointer value), such that
// it can easily be used to traverse larger VPack values.
// A Slice does not own the VPack data it points to!
class Slice {
// This class provides read only access to a VPack value, it is
// intentionally light-weight (only one pointer value), such that
// it can easily be used to traverse larger VPack values.
// A Slice does not own the VPack data it points to!
friend class Builder;
friend class ArrayIterator;
friend class ObjectIterator;
@ -1105,6 +1104,9 @@ class Slice {
}
};
static_assert(!std::is_polymorphic<Slice>::value, "Slice must not be polymorphic");
static_assert(!std::has_virtual_destructor<Slice>::value, "Slice must not have virtual dtor");
} // namespace arangodb::velocypack
} // namespace arangodb

View File

@ -70,6 +70,7 @@ using VPackNormalizedCompare = arangodb::velocypack::NormalizedCompare;
#ifdef VELOCYPACK_BUFFER_H
#ifndef VELOCYPACK_ALIAS_BUFFER
#define VELOCYPACK_ALIAS_BUFFER
using VPackCharBuffer = arangodb::velocypack::CharBuffer;
using VPackBufferUInt8 = arangodb::velocypack::UInt8Buffer;
template<typename T> using VPackBuffer = arangodb::velocypack::Buffer<T>;
#endif

View File

@ -89,6 +89,7 @@ bool checkAttributeUniquenessUnsortedBrute(ObjectIterator& it) {
do {
// key(true) guarantees a String as returned type
StringRef key = it.key(true).stringRef();
ValueLength index = it.index();
// compare with all other already looked-at keys
for (ValueLength i = 0; i < index; ++i) {
@ -124,19 +125,9 @@ bool checkAttributeUniquenessUnsortedSet(ObjectIterator& it) {
return true;
}
} // namespace
// create an empty Builder, using default Options
Builder::Builder()
: _buffer(std::make_shared<Buffer<uint8_t>>()),
_bufferPtr(_buffer.get()),
_start(_bufferPtr->data()),
_pos(0),
_keyWritten(false),
options(&Options::Defaults) {}
// create an empty Builder, with custom Options
// create an empty Builder, using Options
Builder::Builder(Options const* options)
: _buffer(std::make_shared<Buffer<uint8_t>>()),
_bufferPtr(_buffer.get()),
@ -149,9 +140,11 @@ Builder::Builder(Options const* options)
}
}
// create an empty Builder, using an existing buffer
Builder::Builder(std::shared_ptr<Buffer<uint8_t>> const& buffer, Options const* options)
: _buffer(buffer),
_bufferPtr(_buffer.get()),
_start(nullptr),
_pos(0),
_keyWritten(false),
options(options) {
@ -165,42 +158,62 @@ Builder::Builder(std::shared_ptr<Buffer<uint8_t>> const& buffer, Options const*
}
}
// create a Builder that uses an existing Buffer. the Builder will not
// claim ownership for this Buffer
Builder::Builder(Buffer<uint8_t>& buffer, Options const* options)
: _bufferPtr(nullptr),
: _bufferPtr(&buffer),
_start(_bufferPtr->data()),
_pos(buffer.size()),
_keyWritten(false),
options(options) {
_buffer.reset(&buffer, BufferNonDeleter<uint8_t>());
_bufferPtr = _buffer.get();
_start = _bufferPtr->data();
if (VELOCYPACK_UNLIKELY(options == nullptr)) {
throw Exception(Exception::InternalError, "Options cannot be a nullptr");
}
}
// populate a Builder from a Slice
Builder::Builder(Slice slice, Options const* options)
: Builder(options) {
add(slice);
}
Builder::Builder(Builder const& that)
: _buffer(std::make_shared<Buffer<uint8_t>>(*that._buffer)),
_bufferPtr(_buffer.get()),
_start(_bufferPtr->data()),
: _bufferPtr(nullptr),
_start(nullptr),
_pos(that._pos),
_stack(that._stack),
_index(that._index),
_keyWritten(that._keyWritten),
options(that.options) {
VELOCYPACK_ASSERT(options != nullptr);
if (that._buffer == nullptr) {
_bufferPtr = that._bufferPtr;
} else {
_buffer = std::make_shared<Buffer<uint8_t>>(*that._buffer);
_bufferPtr = _buffer.get();
}
if (_bufferPtr != nullptr) {
_start = _bufferPtr->data();
}
}
Builder& Builder::operator=(Builder const& that) {
if (this != &that) {
_buffer = std::make_shared<Buffer<uint8_t>>(*that._buffer);
_bufferPtr = _buffer.get();
_start = _bufferPtr->data();
if (that._buffer == nullptr) {
_buffer.reset();
_bufferPtr = that._bufferPtr;
} else {
_buffer = std::make_shared<Buffer<uint8_t>>(*that._buffer);
_bufferPtr = _buffer.get();
}
if (_bufferPtr == nullptr) {
_start = nullptr;
} else {
_start = _bufferPtr->data();
}
_pos = that._pos;
_stack = that._stack;
_index = that._index;
@ -211,41 +224,50 @@ Builder& Builder::operator=(Builder const& that) {
return *this;
}
Builder::Builder(Builder&& that) {
if (VELOCYPACK_UNLIKELY(!that.isClosed())) {
throw Exception(Exception::InternalError, "Cannot move an open Builder");
Builder::Builder(Builder&& that) noexcept
: _buffer(std::move(that._buffer)),
_bufferPtr(nullptr),
_start(nullptr),
_pos(that._pos),
_stack(std::move(that._stack)),
_index(std::move(that._index)),
_keyWritten(that._keyWritten),
options(that.options) {
if (_buffer != nullptr) {
_bufferPtr = _buffer.get();
} else {
_bufferPtr = that._bufferPtr;
}
_buffer = that._buffer;
_bufferPtr = _buffer.get();
_start = _bufferPtr->data();
_pos = that._pos;
_stack.clear();
_stack.swap(that._stack);
_index.clear();
_index.swap(that._index);
_keyWritten = that._keyWritten;
options = that.options;
that._pos = 0;
that._keyWritten = false;
if (_bufferPtr != nullptr) {
_start = _bufferPtr->data();
}
VELOCYPACK_ASSERT(that._buffer == nullptr);
that._bufferPtr = nullptr;
that.clear();
}
Builder& Builder::operator=(Builder&& that) {
if (VELOCYPACK_UNLIKELY(!that.isClosed())) {
throw Exception(Exception::InternalError, "Cannot move an open Builder");
}
Builder& Builder::operator=(Builder&& that) noexcept {
if (this != &that) {
_buffer = that._buffer;
_bufferPtr = _buffer.get();
_start = _bufferPtr->data();
_buffer = std::move(that._buffer);
if (_buffer != nullptr) {
_bufferPtr = _buffer.get();
} else {
_bufferPtr = that._bufferPtr;
}
if (_bufferPtr != nullptr) {
_start = _bufferPtr->data();
} else {
_start = nullptr;
}
_pos = that._pos;
_stack.clear();
_stack.swap(that._stack);
_index.clear();
_index.swap(that._index);
_stack = std::move(that._stack);
_index = std::move(that._index);
_keyWritten = that._keyWritten;
options = that.options;
that._pos = 0;
that._keyWritten = false;
VELOCYPACK_ASSERT(that._buffer == nullptr);
that._bufferPtr = nullptr;
that.clear();
}
return *this;
}
@ -444,41 +466,50 @@ Builder& Builder::closeArray(ValueLength tos, std::vector<ValueLength>& index) {
offsetSize = 8;
}
if (offsetSize < 8 &&
!needIndexTable &&
options->paddingBehavior == Options::PaddingBehavior::UsePadding) {
// if we are allowed to use padding, we will pad to 8 bytes anyway. as we are not
// using an index table, we can also use type 0x05 for all Arrays without making
// things worse space-wise
offsetSize = 8;
}
// Maybe we need to move down data:
if (offsetSize == 1) {
if (offsetSize == 1 || offsetSize == 2) {
// check if one of the first entries in the array is ValueType::None
// (0x00). in this case, we could not distinguish between a None (0x00)
// and the optional padding. so we must prevent the memmove here
bool allowMemMove = true;
std::size_t const n = (std::min)(std::size_t(6), index.size());
for (std::size_t i = 0; i < n; i++) {
if (_start[tos + index[i]] == 0x00) {
allowMemMove = false;
break;
}
}
bool allowMemMove = options->paddingBehavior == Options::PaddingBehavior::NoPadding ||
(offsetSize == 1 && options->paddingBehavior == Options::PaddingBehavior::Flexible);
if (allowMemMove) {
ValueLength targetPos = 3;
if (!needIndexTable) {
targetPos = 2;
}
if (_pos > (tos + 9)) {
ValueLength len = _pos - (tos + 9);
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
}
ValueLength const diff = 9 - targetPos;
rollback(diff);
if (needIndexTable) {
std::size_t const n = index.size();
for (std::size_t i = 0; i < n; i++) {
index[i] -= diff;
std::size_t const n = (std::min)(std::size_t(8 - 2 * offsetSize), index.size());
for (std::size_t i = 0; i < n; i++) {
if (_start[tos + index[i]] == 0x00) {
allowMemMove = false;
break;
}
} // Note: if !needIndexTable the index array is now wrong!
}
if (allowMemMove) {
ValueLength targetPos = 1 + 2 * offsetSize;
if (!needIndexTable) {
targetPos -= offsetSize;
}
if (_pos > (tos + 9)) {
ValueLength len = _pos - (tos + 9);
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
}
ValueLength const diff = 9 - targetPos;
rollback(diff);
if (needIndexTable) {
std::size_t const n = index.size();
for (std::size_t i = 0; i < n; i++) {
index[i] -= diff;
}
} // Note: if !needIndexTable the index array is now wrong!
}
}
}
// One could move down things in the offsetSize == 2 case as well,
// since we only need 4 bytes in the beginning. However, saving these
// 4 bytes has been sacrificed on the Altar of Performance.
// Now build the table:
if (needIndexTable) {
@ -558,6 +589,13 @@ Builder& Builder::close() {
(head == 0x06 && options->buildUnindexedArrays) ||
(head == 0x0b && (options->buildUnindexedObjects || index.size() == 1))) {
if (closeCompactArrayOrObject(tos, isArray, index)) {
// And, if desired, check attribute uniqueness:
if (options->checkAttributeUniqueness &&
index.size() > 1 &&
!checkAttributeUniqueness(Slice(_start + tos))) {
// duplicate attribute name!
throw Exception(Exception::DuplicateAttributeName);
}
return *this;
}
// This might fall through, if closeCompactArrayOrObject gave up!
@ -583,9 +621,20 @@ Builder& Builder::close() {
// case we would win back 6 bytes but would need one byte per subvalue
// for the index table
offsetSize = 1;
// One could move down things in the offsetSize == 2 case as well,
// since we only need 4 bytes in the beginning. However, saving these
// 4 bytes has been sacrificed on the Altar of Performance.
} else if (_pos - tos + 2 * index.size() <= 0xffff) {
offsetSize = 2;
} else if (_pos - tos + 4 * index.size() <= 0xffffffffu) {
offsetSize = 4;
}
if (offsetSize < 4 &&
(options->paddingBehavior == Options::PaddingBehavior::NoPadding ||
(offsetSize == 1 && options->paddingBehavior == Options::PaddingBehavior::Flexible))) {
// Maybe we need to move down data:
ValueLength targetPos = 3;
ValueLength targetPos = 1 + 2 * offsetSize;
if (_pos > (tos + 9)) {
ValueLength len = _pos - (tos + 9);
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
@ -596,14 +645,6 @@ Builder& Builder::close() {
for (std::size_t i = 0; i < n; i++) {
index[i] -= diff;
}
// One could move down things in the offsetSize == 2 case as well,
// since we only need 4 bytes in the beginning. However, saving these
// 4 bytes has been sacrificed on the Altar of Performance.
} else if (_pos - tos + 2 * index.size() <= 0xffff) {
offsetSize = 2;
} else if (_pos - tos + 4 * index.size() <= 0xffffffffu) {
offsetSize = 4;
}
// Now build the table:
@ -1042,7 +1083,7 @@ bool Builder::checkAttributeUniqueness(Slice obj) const {
VELOCYPACK_ASSERT(options->checkAttributeUniqueness == true);
VELOCYPACK_ASSERT(obj.isObject());
VELOCYPACK_ASSERT(obj.length() >= 2);
if (obj.isSorted()) {
// object attributes are sorted
return checkAttributeUniquenessSorted(obj);
@ -1091,7 +1132,7 @@ bool Builder::checkAttributeUniquenessUnsorted(Slice obj) const {
// will use an std::unordered_set for O(1) lookups but with heap
// allocations
ObjectIterator it(obj, true);
if (it.size() <= ::LinearAttributeUniquenessCutoff) {
return ::checkAttributeUniquenessUnsortedBrute(it);
}

View File

@ -31,15 +31,17 @@
using namespace arangodb::velocypack;
std::string HexDump::toHex(uint8_t value) {
std::string result("0x");
std::string HexDump::toHex(uint8_t value, std::string const& header) {
std::string result(header);
appendHex(result, value);
return result;
}
void HexDump::appendHex(std::string& result, uint8_t value) {
uint8_t x = value / 16;
result.push_back((x < 10 ? ('0' + x) : ('a' + x - 10)));
x = value % 16;
result.push_back((x < 10 ? ('0' + x) : ('a' + x - 10)));
return result;
}
std::string HexDump::toString() const {
@ -59,7 +61,8 @@ std::string HexDump::toString() const {
}
}
result.append(HexDump::toHex(it));
result.append(header);
HexDump::appendHex(result, it);
++current;
}

View File

@ -28,9 +28,10 @@
using namespace arangodb::velocypack;
#if __cplusplus < 201703L
constexpr uint8_t SliceStaticData::FixedTypeLengths[256];
constexpr ValueType SliceStaticData::TypeMap[256];
constexpr unsigned int SliceStaticData::WidthMap[32];
constexpr unsigned int SliceStaticData::FirstSubMap[32];
constexpr uint64_t SliceStaticData::PrecalculatedHashesForDefaultSeed[256];
#endif

View File

@ -1824,7 +1824,7 @@ int fetchEdgesFromEngines(transaction::Methods& trx,
futures.emplace_back(
network::sendRequestRetry(pool, "server:" + engine.first, fuerte::RestVerb::Put,
url + StringUtils::itoa(engine.second),
*leased->buffer(), network::Timeout(CL_DEFAULT_TIMEOUT)));
leased->bufferRef(), network::Timeout(CL_DEFAULT_TIMEOUT)));
}
for (Future<network::Response>& f : futures) {
@ -1912,7 +1912,7 @@ int fetchEdgesFromEngines(
futures.emplace_back(
network::sendRequestRetry(pool, "server:" + engine.first, fuerte::RestVerb::Put,
url + StringUtils::itoa(engine.second),
*leased->buffer(), network::Timeout(CL_DEFAULT_TIMEOUT)));
leased->bufferRef(), network::Timeout(CL_DEFAULT_TIMEOUT)));
}
for (Future<network::Response>& f : futures) {
@ -1999,7 +1999,7 @@ void fetchVerticesFromEngines(
futures.emplace_back(
network::sendRequestRetry(pool, "server:" + engine.first, fuerte::RestVerb::Put,
url + StringUtils::itoa(engine.second),
*(leased->buffer()), network::Timeout(CL_DEFAULT_TIMEOUT)));
leased->bufferRef(), network::Timeout(CL_DEFAULT_TIMEOUT)));
}
for (Future<network::Response>& f : futures) {

View File

@ -510,8 +510,8 @@ OperationResult GraphOperations::getDocument(std::string const& collectionName,
return result;
}
GraphOperations::VPackBufferPtr GraphOperations::_getSearchSlice(
const std::string& key, boost::optional<TRI_voc_rid_t>& rev) const {
GraphOperations::VPackBufferPtr GraphOperations::_getSearchSlice(std::string const& key,
boost::optional<TRI_voc_rid_t>& rev) const {
VPackBuilder builder;
{
VPackObjectBuilder guard(&builder);
@ -521,11 +521,11 @@ GraphOperations::VPackBufferPtr GraphOperations::_getSearchSlice(
}
}
return builder.buffer();
return builder.steal();
}
OperationResult GraphOperations::removeEdge(const std::string& definitionName,
const std::string& key,
OperationResult GraphOperations::removeEdge(std::string const& definitionName,
std::string const& key,
boost::optional<TRI_voc_rid_t> rev,
bool waitForSync, bool returnOld) {
return removeEdgeOrVertex(definitionName, key, rev, waitForSync, returnOld);

View File

@ -201,7 +201,7 @@ void MMFilesCollectionKeys::dumpDocs(arangodb::velocypack::Builder& result,
THROW_ARANGO_EXCEPTION(TRI_ERROR_BAD_PARAMETER);
}
auto buffer = result.buffer();
auto& buffer = result.bufferRef();
size_t offset = 0;
for (VPackSlice it : VPackArrayIterator(ids)) {
@ -223,7 +223,7 @@ void MMFilesCollectionKeys::dumpDocs(arangodb::velocypack::Builder& result,
TRI_ASSERT(current.isObject());
result.add(current);
if (buffer->byteSize() > maxChunkSize) {
if (buffer.byteSize() > maxChunkSize) {
// buffer is full
break;
}

View File

@ -525,7 +525,7 @@ void RestGraphHandler::generateResultMergedWithObject(VPackSlice obj,
result.close();
VPackBuilder merged = VelocyPackHelper::merge(result.slice(), obj, false, false);
writeResult(std::move(*merged.buffer().get()), options);
writeResult(merged.slice(), options);
} catch (...) {
// Building the error response failed
generateError(rest::ResponseCode::SERVER_ERROR, TRI_ERROR_INTERNAL,

View File

@ -636,7 +636,7 @@ arangodb::Result RocksDBReplicationContext::dumpDocuments(
auto* rcoll = static_cast<RocksDBMetaCollection*>(cIter->logical->getPhysical());
const uint64_t cObjectId = rcoll->objectId();
auto buffer = b.buffer();
auto& buffer = b.bufferRef();
bool hasMore = true;
b.openArray(true);
size_t oldPos = from;
@ -693,7 +693,7 @@ arangodb::Result RocksDBReplicationContext::dumpDocuments(
hasMore = cIter->hasMore();
}
if (buffer->byteSize() > maxChunkSize) {
if (buffer.byteSize() > maxChunkSize) {
// result is big enough so that we abort prematurely
full = true;
}

View File

@ -673,7 +673,7 @@ RocksDBReplicationResult rocksutils::tailWal(TRI_vocbase_t* vocbase, uint64_t ti
// we need to check if the builder is bigger than the chunksize,
// only after we printed a full WriteBatch. Otherwise a client might
// never read the full writebatch
while (iterator->Valid() && lastTick <= tickEnd && builder.buffer()->size() < chunkSize) {
while (iterator->Valid() && lastTick <= tickEnd && builder.bufferRef().size() < chunkSize) {
s = iterator->status();
if (!s.ok()) {
LOG_TOPIC("ed096", ERR, Logger::REPLICATION) << "error during WAL scan: " << s.ToString();

View File

@ -392,7 +392,7 @@ OperationResult transaction::helpers::buildCountResult(
}
resultBuilder.add(VPackValue(result));
}
return OperationResult(Result(), resultBuilder.buffer());
return OperationResult(Result(), resultBuilder.steal());
}
/// @brief creates an id string from a custom _id value and the _key string

View File

@ -2609,7 +2609,7 @@ futures::Future<OperationResult> transaction::Methods::countCoordinatorHelper(
// return number from cache
VPackBuilder resultBuilder;
resultBuilder.add(VPackValue(documents));
return OperationResult(Result(), resultBuilder.buffer());
return OperationResult(Result(), resultBuilder.steal());
}
/// @brief count the number of documents in a collection