mirror of https://gitee.com/bigwinds/arangodb
Feature/reduce extraction to projection (#2792)
* reduce extractions to projections * recycle string buffers in SocketTask * micro optimizations for mmfiles indexes * added special lookup function for _key * moved function into the correct file * speed up key buffer allocations a bit * added noexcept specifier * correctly name variable * explicitly move bounds * fix and speedup from/toPersistent functions * reuse string from ManagedDocumentResult for multiple lookups * use move-assign * a bit less work for single server * speedup AQL function HASH * single fetch optimization * performance optimization for the case when no documents need to be returned * make reduce-extraction-to-projection a RocksDB-only optimizer rule * cppcheck * try to fix compile error on MacOS * bug fix for MacOSX * missing namespace (in Windows compile)
This commit is contained in:
parent
1835512113
commit
a5a25754ed
|
@ -57,12 +57,12 @@ class ArrayIterator {
|
|||
_current(other._current) {}
|
||||
|
||||
ArrayIterator& operator=(ArrayIterator const& other) = delete;
|
||||
ArrayIterator& operator=(ArrayIterator && other) = default;
|
||||
ArrayIterator& operator=(ArrayIterator&& other) = default;
|
||||
|
||||
// prefix ++
|
||||
ArrayIterator& operator++() {
|
||||
++_position;
|
||||
if (_position <= _size && _current != nullptr) {
|
||||
if (_position < _size && _current != nullptr) {
|
||||
_current += Slice(_current).byteSize();
|
||||
} else {
|
||||
_current = nullptr;
|
||||
|
@ -210,12 +210,12 @@ class ObjectIterator {
|
|||
_useSequentialIteration(other._useSequentialIteration) {}
|
||||
|
||||
ObjectIterator& operator=(ObjectIterator const& other) = delete;
|
||||
ObjectIterator& operator=(ObjectIterator && other) = default;
|
||||
ObjectIterator& operator=(ObjectIterator&& other) = default;
|
||||
|
||||
// prefix ++
|
||||
ObjectIterator& operator++() {
|
||||
++_position;
|
||||
if (_position <= _size && _current != nullptr) {
|
||||
if (_position < _size && _current != nullptr) {
|
||||
// skip over key
|
||||
_current += Slice(_current).byteSize();
|
||||
// skip over value
|
||||
|
|
|
@ -177,6 +177,7 @@ Builder& Builder::closeEmptyArrayOrObject(ValueLength tos, bool isArray) {
|
|||
|
||||
bool Builder::closeCompactArrayOrObject(ValueLength tos, bool isArray,
|
||||
std::vector<ValueLength> const& index) {
|
||||
|
||||
// use compact notation
|
||||
ValueLength nLen =
|
||||
getVariableValueLength(static_cast<ValueLength>(index.size()));
|
||||
|
@ -222,30 +223,36 @@ bool Builder::closeCompactArrayOrObject(ValueLength tos, bool isArray,
|
|||
}
|
||||
|
||||
Builder& Builder::closeArray(ValueLength tos, std::vector<ValueLength>& index) {
|
||||
VELOCYPACK_ASSERT(!index.empty());
|
||||
|
||||
// fix head byte in case a compact Array was originally requested:
|
||||
_start[tos] = 0x06;
|
||||
|
||||
bool needIndexTable = true;
|
||||
bool needNrSubs = true;
|
||||
|
||||
if (index.size() == 1) {
|
||||
// just one array entry
|
||||
needIndexTable = false;
|
||||
needNrSubs = false;
|
||||
} else if ((_pos - tos) - index[0] == index.size() * (index[1] - index[0])) {
|
||||
// In this case it could be that all entries have the same length
|
||||
// and we do not need an offset table at all:
|
||||
bool noTable = true;
|
||||
bool buildIndexTable = false;
|
||||
ValueLength const subLen = index[1] - index[0];
|
||||
if ((_pos - tos) - index[index.size() - 1] != subLen) {
|
||||
noTable = false;
|
||||
buildIndexTable = true;
|
||||
} else {
|
||||
for (size_t i = 1; i < index.size() - 1; i++) {
|
||||
if (index[i + 1] - index[i] != subLen) {
|
||||
noTable = false;
|
||||
// different lengths
|
||||
buildIndexTable = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (noTable) {
|
||||
|
||||
if (!buildIndexTable) {
|
||||
needIndexTable = false;
|
||||
needNrSubs = false;
|
||||
}
|
||||
|
@ -273,22 +280,35 @@ Builder& Builder::closeArray(ValueLength tos, std::vector<ValueLength>& index) {
|
|||
|
||||
// Maybe we need to move down data:
|
||||
if (offsetSize == 1) {
|
||||
ValueLength targetPos = 3;
|
||||
if (!needIndexTable) {
|
||||
targetPos = 2;
|
||||
}
|
||||
if (_pos > (tos + 9)) {
|
||||
ValueLength len = _pos - (tos + 9);
|
||||
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
|
||||
}
|
||||
ValueLength const diff = 9 - targetPos;
|
||||
_pos -= diff;
|
||||
if (needIndexTable) {
|
||||
size_t const n = index.size();
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
index[i] -= diff;
|
||||
// check if one of the first entries in the array is ValueType::None
|
||||
// (0x00). in this case, we could not distinguish between a None (0x00)
|
||||
// and the optional padding. so we must prevent the memmove here
|
||||
bool allowMemMove = true;
|
||||
size_t const n = (std::min)(size_t(6), index.size());
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
if (_start[tos + index[i]] == 0x00) {
|
||||
allowMemMove = false;
|
||||
break;
|
||||
}
|
||||
} // Note: if !needIndexTable the index array is now wrong!
|
||||
}
|
||||
if (allowMemMove) {
|
||||
ValueLength targetPos = 3;
|
||||
if (!needIndexTable) {
|
||||
targetPos = 2;
|
||||
}
|
||||
if (_pos > (tos + 9)) {
|
||||
ValueLength len = _pos - (tos + 9);
|
||||
memmove(_start + tos + targetPos, _start + tos + 9, checkOverflow(len));
|
||||
}
|
||||
ValueLength const diff = 9 - targetPos;
|
||||
_pos -= diff;
|
||||
if (needIndexTable) {
|
||||
size_t const n = index.size();
|
||||
for (size_t i = 0; i < n; i++) {
|
||||
index[i] -= diff;
|
||||
}
|
||||
} // Note: if !needIndexTable the index array is now wrong!
|
||||
}
|
||||
}
|
||||
// One could move down things in the offsetSize == 2 case as well,
|
||||
// since we only need 4 bytes in the beginning. However, saving these
|
||||
|
|
|
@ -445,6 +445,9 @@ The following optimizer rules may appear in the `rules` attribute of a plan:
|
|||
* `remove-sort-rand`: will appear when a *SORT RAND()* expression is removed by
|
||||
moving the random iteration into an *EnumerateCollectionNode*. This optimizer rule
|
||||
is specific for the MMFiles storage engine.
|
||||
* `reduce-extraction-to-projection`: will appear when an *EnumerationCollectionNode* that
|
||||
would have extracted an entire document was modified to return only a projection of each
|
||||
document. This optimizer rule is specific for the RocksDB storage engine.
|
||||
|
||||
The following optimizer rules may appear in the `rules` attribute of cluster plans:
|
||||
|
||||
|
|
77
README
77
README
|
@ -0,0 +1,77 @@
|
|||
****** ArangoDB ******
|
||||
ArangoDB is a multi-model, open-source database with flexible data models for
|
||||
documents, graphs, and key-values. Build high performance applications using a
|
||||
convenient SQL-like query language or JavaScript extensions. Use ACID
|
||||
transactions if you require them. Scale horizontally with a few mouse clicks.
|
||||
The supported data models can be mixed in queries and allow ArangoDB to be the
|
||||
aggregation point for your data.
|
||||
To get started, try one of our 10 minutes tutorials in your favorite
|
||||
programming language or try one of our ArangoDB_Cookbook_recipes.
|
||||
For the impatient: download and install ArangoDB. Start the server arangod and
|
||||
point your browser to http://127.0.0.1:8529/.
|
||||
***** Key Features in ArangoDB *****
|
||||
* Multi-Model: Documents, graphs and key-value pairs — model your data as
|
||||
you see fit for your application.
|
||||
* Joins: Conveniently join what belongs together for flexible ad-hoc
|
||||
querying, less data redundancy.
|
||||
* Transactions: Easy application development keeping your data consistent
|
||||
and safe. No hassle in your client.
|
||||
Here is an AQL query that makes use of all those features:
|
||||
[AQL Query Example]
|
||||
Joins and transactions are key features for flexible, secure data designs,
|
||||
widely used in relational databases but lacking in many NoSQL products.
|
||||
However, there is no need to forgo them in ArangoDB. You decide how and when to
|
||||
use joins and strong consistency guarantees, without sacrificing performance
|
||||
and scalability.
|
||||
Furthermore, ArangoDB offers a JavaScript framework called Foxx that is
|
||||
executed in the database server with direct access to the data. Build your own
|
||||
data-centric microservices with a few lines of code:
|
||||
Microservice Example
|
||||
[Microservice Example]
|
||||
By extending the HTTP API with user code written in JavaScript, ArangoDB can be
|
||||
turned into a strict schema-enforcing persistence engine.
|
||||
Next step, bundle your Foxx application as a docker_container and get it
|
||||
running in the cloud.
|
||||
Other features of ArangoDB include:
|
||||
* Use a data-centric microservices approach with ArangoDB Foxx and fuse
|
||||
your application-logic and database together for maximal throughput
|
||||
* JavaScript for all: no language zoo, you can use one language from your
|
||||
browser to your back-end
|
||||
* Flexible data modeling: model your data as combination of key-value
|
||||
pairs, documents or graphs - perfect for social relations
|
||||
* Different storage engines: ArangoDB provides a storage engine for mostly
|
||||
in-memory operations and an alternative storage engine based on RocksDB
|
||||
which handle datasets that are much bigger than RAM.
|
||||
* Powerful query language (AQL) to retrieve and modify data
|
||||
* Transactions: run queries on multiple documents or collections with
|
||||
optional transactional consistency and isolation
|
||||
* Replication and Sharding: set up the database in a master-slave
|
||||
configuration or spread bigger datasets across multiple servers
|
||||
* Configurable durability: let the application decide if it needs more
|
||||
durability or more performance
|
||||
* Schema-free schemata let you combine the space efficiency of MySQL with
|
||||
the performance power of NoSQL
|
||||
* Free index choice: use the correct index for your problem, be it a
|
||||
skiplist or a fulltext search
|
||||
* ArangoDB is multi-threaded - exploit the power of all your cores
|
||||
* It is open source (Apache License 2.0)
|
||||
For more in-depth information read the design_goals_of_ArangoDB
|
||||
***** Latest Release *****
|
||||
Packages for all supported platforms can be downloaded from https://
|
||||
www.arangodb.com/download.
|
||||
Please also check what's_new_in_ArangoDB.
|
||||
***** More Information *****
|
||||
Please check the Installation_Manual for installation and compilation
|
||||
instructions.
|
||||
The User_Manual has an introductory chapter showing the basic operations of
|
||||
ArangoDB.
|
||||
***** Stay in Contact *****
|
||||
We really appreciate feature requests and bug reports. Please use our Github
|
||||
issue tracker for reporting them:
|
||||
https://github.com/arangodb/arangodb/issues
|
||||
You can use our Google group for improvements, feature requests, comments:
|
||||
https://www.arangodb.com/community
|
||||
StackOverflow is great for questions about AQL, usage scenarios etc.
|
||||
https://stackoverflow.com/questions/tagged/arangodb
|
||||
To chat with the community and the developers we offer a Slack chat:
|
||||
https://slack.arangodb.com/
|
|
@ -45,6 +45,7 @@ uint64_t AqlValue::hash(transaction::Methods* trx, uint64_t seed) const {
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
// we must use the slow hash function here, because a value may have
|
||||
// different representations in case it's an array/object/number
|
||||
|
@ -192,6 +193,7 @@ size_t AqlValue::length() const {
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
return static_cast<size_t>(slice().length());
|
||||
}
|
||||
|
@ -217,6 +219,8 @@ AqlValue AqlValue::at(transaction::Methods* trx,
|
|||
// fall-through intentional
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isArray()) {
|
||||
|
@ -293,6 +297,8 @@ AqlValue AqlValue::getKeyAttribute(transaction::Methods* trx,
|
|||
doCopy = false;
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isObject()) {
|
||||
|
@ -330,6 +336,8 @@ AqlValue AqlValue::getIdAttribute(transaction::Methods* trx,
|
|||
// fall-through intentional
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isObject()) {
|
||||
|
@ -372,6 +380,8 @@ AqlValue AqlValue::getFromAttribute(transaction::Methods* trx,
|
|||
// fall-through intentional
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isObject()) {
|
||||
|
@ -409,6 +419,8 @@ AqlValue AqlValue::getToAttribute(transaction::Methods* trx,
|
|||
// fall-through intentional
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isObject()) {
|
||||
|
@ -447,6 +459,8 @@ AqlValue AqlValue::get(transaction::Methods* trx,
|
|||
// fall-through intentional
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isObject()) {
|
||||
|
@ -494,6 +508,8 @@ AqlValue AqlValue::get(transaction::Methods* trx,
|
|||
// fall-through intentional
|
||||
case VPACK_INLINE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_SLICE:
|
||||
// fall-through intentional
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isObject()) {
|
||||
|
@ -556,6 +572,7 @@ bool AqlValue::hasKey(transaction::Methods* trx,
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
return (s.isObject() && s.hasKey(name));
|
||||
|
@ -581,6 +598,7 @@ double AqlValue::toDouble(transaction::Methods* trx, bool& failed) const {
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isNull()) {
|
||||
|
@ -647,6 +665,7 @@ int64_t AqlValue::toInt64(transaction::Methods* trx) const {
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isNumber()) {
|
||||
|
@ -698,6 +717,7 @@ bool AqlValue::toBoolean() const {
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(slice());
|
||||
if (s.isBoolean()) {
|
||||
|
@ -792,6 +812,7 @@ v8::Handle<v8::Value> AqlValue::toV8(
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackOptions* options = trx->transactionContext()->getVPackOptions();
|
||||
return TRI_VPackToV8(isolate, slice(), options);
|
||||
|
@ -851,6 +872,7 @@ void AqlValue::toVelocyPack(transaction::Methods* trx,
|
|||
break;
|
||||
} // fallthrough intentional
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
if (resolveExternals) {
|
||||
bool const sanitizeExternals = true;
|
||||
|
@ -891,6 +913,7 @@ AqlValue AqlValue::materialize(transaction::Methods* trx, bool& hasCopied,
|
|||
switch (type()) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
hasCopied = false;
|
||||
return *this;
|
||||
|
@ -919,7 +942,7 @@ AqlValue AqlValue::clone() const {
|
|||
case VPACK_SLICE_POINTER: {
|
||||
if (isManagedDocument()) {
|
||||
// copy from externally managed document. this will not copy the data
|
||||
return AqlValue(_data.pointer, AqlValueFromManagedDocument());
|
||||
return AqlValue(AqlValueHintNoCopy(_data.pointer));
|
||||
}
|
||||
// copy from regular pointer. this may copy the data
|
||||
return AqlValue(_data.pointer);
|
||||
|
@ -928,13 +951,12 @@ AqlValue AqlValue::clone() const {
|
|||
// copy internal data
|
||||
return AqlValue(slice());
|
||||
}
|
||||
case VPACK_MANAGED_SLICE: {
|
||||
return AqlValue(AqlValueHintCopy(_data.slice));
|
||||
}
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
// copy buffer
|
||||
VPackValueLength length = _data.buffer->size();
|
||||
auto buffer = new VPackBuffer<uint8_t>(length);
|
||||
buffer->append(reinterpret_cast<char const*>(_data.buffer->data()),
|
||||
length);
|
||||
return AqlValue(buffer);
|
||||
return AqlValue(VPackSlice(_data.buffer->data()));
|
||||
}
|
||||
case DOCVEC: {
|
||||
auto c = std::make_unique<std::vector<AqlItemBlock*>>();
|
||||
|
@ -969,6 +991,10 @@ void AqlValue::destroy() {
|
|||
// nothing to do
|
||||
return;
|
||||
}
|
||||
case VPACK_MANAGED_SLICE: {
|
||||
delete _data.slice;
|
||||
break;
|
||||
}
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
delete _data.buffer;
|
||||
break;
|
||||
|
@ -1002,6 +1028,13 @@ VPackSlice AqlValue::slice() const {
|
|||
}
|
||||
return s;
|
||||
}
|
||||
case VPACK_MANAGED_SLICE: {
|
||||
VPackSlice s(_data.slice);
|
||||
if (s.isExternal()) {
|
||||
s = s.resolveExternal();
|
||||
}
|
||||
return s;
|
||||
}
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
VPackSlice s(_data.buffer->data());
|
||||
if (s.isExternal()) {
|
||||
|
@ -1106,6 +1139,7 @@ int AqlValue::Compare(transaction::Methods* trx, AqlValue const& left,
|
|||
switch (leftType) {
|
||||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
case VPACK_MANAGED_SLICE:
|
||||
case VPACK_MANAGED_BUFFER: {
|
||||
return arangodb::basics::VelocyPackHelper::compare(
|
||||
left.slice(), right.slice(), compareUtf8, trx->transactionContextPtr()->getVPackOptions());
|
||||
|
|
|
@ -74,14 +74,31 @@ namespace arangodb {
|
|||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
;
|
||||
|
||||
namespace aql {
|
||||
class AqlItemBlock;
|
||||
|
||||
// no-op struct used only in an internal API to signal we want
|
||||
// to construct from an externally managed document
|
||||
struct AqlValueFromManagedDocument {};
|
||||
// no-op struct used only internally to indicate that we want
|
||||
// to copy the data behind the passed pointer
|
||||
struct AqlValueHintCopy {
|
||||
explicit AqlValueHintCopy(uint8_t const* ptr) : ptr(ptr) {}
|
||||
uint8_t const* ptr;
|
||||
};
|
||||
|
||||
// no-op struct used only internally to indicate that we want
|
||||
// to NOT copy the data behind the passed pointer
|
||||
struct AqlValueHintNoCopy {
|
||||
explicit AqlValueHintNoCopy(uint8_t const* ptr) : ptr(ptr) {}
|
||||
uint8_t const* ptr;
|
||||
};
|
||||
|
||||
// no-op struct used only internally to indicate that we want
|
||||
// to pass the ownership of the data behind the passed pointer
|
||||
// to the callee
|
||||
struct AqlValueHintTransferOwnership {
|
||||
explicit AqlValueHintTransferOwnership(uint8_t* ptr) : ptr(ptr) {}
|
||||
uint8_t* ptr;
|
||||
};
|
||||
|
||||
struct AqlValue final {
|
||||
friend struct std::hash<arangodb::aql::AqlValue>;
|
||||
|
@ -94,6 +111,7 @@ struct AqlValue final {
|
|||
enum AqlValueType : uint8_t {
|
||||
VPACK_SLICE_POINTER, // contains a pointer to a vpack document, memory is not managed!
|
||||
VPACK_INLINE, // contains vpack data, inline
|
||||
VPACK_MANAGED_SLICE, // contains vpack, via pointer to a managed uint8_t slice
|
||||
VPACK_MANAGED_BUFFER, // contains vpack, via pointer to a managed buffer
|
||||
DOCVEC, // a vector of blocks of results coming from a subquery, managed
|
||||
RANGE // a pointer to a range remembering lower and upper bound, managed
|
||||
|
@ -109,6 +127,9 @@ struct AqlValue final {
|
|||
/// VPACK_INLINE: VPack values with a size less than 16 bytes can be stored
|
||||
/// directly inside the data.internal structure. All data is stored inline,
|
||||
/// so there is no need for memory management.
|
||||
/// VPACK_MANAGED_SLICE: all values of a larger size will be stored in
|
||||
/// _data.slice via a managed uint8_t* object. The uint8_t* points to a VPack
|
||||
/// data and is managed by the AqlValue.
|
||||
/// VPACK_MANAGED_BUFFER: all values of a larger size will be stored in
|
||||
/// _data.external via a managed VPackBuffer object. The Buffer is managed
|
||||
/// by the AqlValue.
|
||||
|
@ -119,6 +140,7 @@ struct AqlValue final {
|
|||
union {
|
||||
uint8_t internal[16];
|
||||
uint8_t const* pointer;
|
||||
uint8_t* slice;
|
||||
arangodb::velocypack::Buffer<uint8_t>* buffer;
|
||||
std::vector<AqlItemBlock*>* docvec;
|
||||
Range const* range;
|
||||
|
@ -133,12 +155,6 @@ struct AqlValue final {
|
|||
setType(AqlValueType::VPACK_INLINE);
|
||||
}
|
||||
|
||||
// construct from mptr, not copying!
|
||||
AqlValue(uint8_t const* pointer, AqlValueFromManagedDocument const&) noexcept {
|
||||
setPointer<true>(pointer);
|
||||
TRI_ASSERT(!VPackSlice(_data.pointer).isExternal());
|
||||
}
|
||||
|
||||
// construct from pointer, not copying!
|
||||
explicit AqlValue(uint8_t const* pointer) {
|
||||
// we must get rid of Externals first here, because all
|
||||
|
@ -253,20 +269,22 @@ struct AqlValue final {
|
|||
} else if (length <= 126) {
|
||||
// short string... cannot store inline, but we don't need to
|
||||
// create a full-featured Builder object here
|
||||
_data.buffer = new arangodb::velocypack::Buffer<uint8_t>(length + 1);
|
||||
_data.buffer->push_back(static_cast<char>(0x40 + length));
|
||||
_data.buffer->append(value, length);
|
||||
setType(AqlValueType::VPACK_MANAGED_BUFFER);
|
||||
_data.slice = new uint8_t[length + 1];
|
||||
_data.slice[0] = static_cast<uint8_t>(0x40U + length);
|
||||
memcpy(&_data.slice[1], value, length);
|
||||
setType(AqlValueType::VPACK_MANAGED_SLICE);
|
||||
} else {
|
||||
// long string
|
||||
// create a big enough Buffer object
|
||||
auto buffer = std::make_unique<VPackBuffer<uint8_t>>(8 + length);
|
||||
// add string to Builder
|
||||
VPackBuilder builder(*buffer.get());
|
||||
builder.add(VPackValuePair(value, length, VPackValueType::String));
|
||||
// steal Buffer. now we have ownership
|
||||
_data.buffer = buffer.release();
|
||||
setType(AqlValueType::VPACK_MANAGED_BUFFER);
|
||||
// create a big enough uint8_t buffer
|
||||
_data.slice = new uint8_t[length + 9];
|
||||
_data.slice[0] = static_cast<uint8_t>(0xbfU);
|
||||
uint64_t v = length;
|
||||
for (uint64_t i = 0; i < 8; ++i) {
|
||||
_data.slice[i + 1] = v & 0xffU;
|
||||
v >>= 8;
|
||||
}
|
||||
memcpy(&_data.slice[9], value, length);
|
||||
setType(AqlValueType::VPACK_MANAGED_SLICE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -297,11 +315,23 @@ struct AqlValue final {
|
|||
}
|
||||
}
|
||||
|
||||
// construct from Buffer, taking over its ownership
|
||||
explicit AqlValue(arangodb::velocypack::Buffer<uint8_t>* buffer) {
|
||||
TRI_ASSERT(buffer != nullptr);
|
||||
_data.buffer = buffer;
|
||||
setType(AqlValueType::VPACK_MANAGED_BUFFER);
|
||||
// construct from pointer, not copying!
|
||||
explicit AqlValue(AqlValueHintNoCopy const& ptr) noexcept {
|
||||
setPointer<true>(ptr.ptr);
|
||||
TRI_ASSERT(!VPackSlice(_data.pointer).isExternal());
|
||||
}
|
||||
|
||||
// construct from pointer, copying the data behind the pointer
|
||||
explicit AqlValue(AqlValueHintCopy const& ptr) {
|
||||
TRI_ASSERT(ptr.ptr != nullptr);
|
||||
initFromSlice(VPackSlice(ptr.ptr));
|
||||
}
|
||||
|
||||
// construct from pointer, taking over the ownership
|
||||
explicit AqlValue(AqlValueHintTransferOwnership const& ptr) {
|
||||
TRI_ASSERT(ptr.ptr != nullptr);
|
||||
_data.slice = ptr.ptr;
|
||||
setType(AqlValueType::VPACK_MANAGED_SLICE);
|
||||
}
|
||||
|
||||
// construct from Builder, copying contents
|
||||
|
@ -309,7 +339,7 @@ struct AqlValue final {
|
|||
TRI_ASSERT(builder.isClosed());
|
||||
initFromSlice(builder.slice());
|
||||
}
|
||||
|
||||
|
||||
// construct from Builder, copying contents
|
||||
explicit AqlValue(arangodb::velocypack::Builder const* builder) {
|
||||
TRI_ASSERT(builder->isClosed());
|
||||
|
@ -467,8 +497,8 @@ struct AqlValue final {
|
|||
bool resolveExternals) const;
|
||||
|
||||
/// @brief return the slice for the value
|
||||
/// this will throw if the value type is not VPACK_SLICE_POINTER, VPACK_INLINE or
|
||||
/// VPACK_MANAGED_BUFFER
|
||||
/// this will throw if the value type is not VPACK_SLICE_POINTER, VPACK_INLINE,
|
||||
/// VPACK_MANAGED_SLICE or VPACK_MANAGED_BUFFER
|
||||
arangodb::velocypack::Slice slice() const;
|
||||
|
||||
/// @brief clone a value
|
||||
|
@ -490,6 +520,8 @@ struct AqlValue final {
|
|||
case VPACK_SLICE_POINTER:
|
||||
case VPACK_INLINE:
|
||||
return 0;
|
||||
case VPACK_MANAGED_SLICE:
|
||||
return VPackSlice(_data.slice).byteSize();
|
||||
case VPACK_MANAGED_BUFFER:
|
||||
return _data.buffer->size();
|
||||
case DOCVEC:
|
||||
|
@ -538,10 +570,10 @@ struct AqlValue final {
|
|||
memcpy(_data.internal, slice.begin(), static_cast<size_t>(length));
|
||||
setType(AqlValueType::VPACK_INLINE);
|
||||
} else {
|
||||
// Use managed buffer
|
||||
_data.buffer = new arangodb::velocypack::Buffer<uint8_t>(length);
|
||||
_data.buffer->append(reinterpret_cast<char const*>(slice.begin()), length);
|
||||
setType(AqlValueType::VPACK_MANAGED_BUFFER);
|
||||
// Use managed slice
|
||||
_data.slice = new uint8_t[length];
|
||||
memcpy(&_data.slice[0], slice.begin(), length);
|
||||
setType(AqlValueType::VPACK_MANAGED_SLICE);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -667,6 +699,9 @@ struct hash<arangodb::aql::AqlValue> {
|
|||
case arangodb::aql::AqlValue::VPACK_INLINE: {
|
||||
return res ^ static_cast<size_t>(arangodb::velocypack::Slice(&x._data.internal[0]).hash());
|
||||
}
|
||||
case arangodb::aql::AqlValue::VPACK_MANAGED_SLICE: {
|
||||
return res ^ ptrHash(x._data.slice);
|
||||
}
|
||||
case arangodb::aql::AqlValue::VPACK_MANAGED_BUFFER: {
|
||||
return res ^ ptrHash(x._data.buffer);
|
||||
}
|
||||
|
@ -698,6 +733,9 @@ struct equal_to<arangodb::aql::AqlValue> {
|
|||
case arangodb::aql::AqlValue::VPACK_INLINE: {
|
||||
return arangodb::velocypack::Slice(&a._data.internal[0]).equals(arangodb::velocypack::Slice(&b._data.internal[0]));
|
||||
}
|
||||
case arangodb::aql::AqlValue::VPACK_MANAGED_SLICE: {
|
||||
return a._data.slice == b._data.slice;
|
||||
}
|
||||
case arangodb::aql::AqlValue::VPACK_MANAGED_BUFFER: {
|
||||
return a._data.buffer == b._data.buffer;
|
||||
}
|
||||
|
|
|
@ -1566,6 +1566,59 @@ void Ast::injectBindParameters(BindParameters& parameters) {
|
|||
}
|
||||
}
|
||||
|
||||
/// @brief replace an attribute access with just the variable
|
||||
AstNode* Ast::replaceAttributeAccess(
|
||||
AstNode* node, Variable const* variable, std::vector<std::string> const& attribute) {
|
||||
TRI_ASSERT(!attribute.empty());
|
||||
if (attribute.empty()) {
|
||||
return node;
|
||||
}
|
||||
|
||||
std::vector<std::string> attributePath;
|
||||
|
||||
auto visitor = [&](AstNode* node, void*) -> AstNode* {
|
||||
if (node == nullptr) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (node->type != NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
return node;
|
||||
}
|
||||
|
||||
attributePath.clear();
|
||||
AstNode* origNode = node;
|
||||
|
||||
while (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
attributePath.emplace_back(node->getString());
|
||||
node = node->getMember(0);
|
||||
}
|
||||
|
||||
if (attributePath.size() != attribute.size()) {
|
||||
// different attribute
|
||||
return origNode;
|
||||
}
|
||||
for (size_t i = 0; i < attribute.size(); ++i) {
|
||||
if (attribute[i] != attributePath[i]) {
|
||||
// different attribute
|
||||
return origNode;
|
||||
}
|
||||
}
|
||||
// same attribute
|
||||
|
||||
if (node->type == NODE_TYPE_REFERENCE) {
|
||||
auto v = static_cast<Variable*>(node->getData());
|
||||
if (v != nullptr && v->id == variable->id) {
|
||||
// our variable... now replace the attribute access with just the variable
|
||||
return node;
|
||||
}
|
||||
}
|
||||
|
||||
return origNode;
|
||||
};
|
||||
|
||||
return traverseAndModify(node, visitor, nullptr);
|
||||
}
|
||||
|
||||
/// @brief replace variables
|
||||
AstNode* Ast::replaceVariables(
|
||||
AstNode* node,
|
||||
|
@ -1994,6 +2047,129 @@ TopLevelAttributes Ast::getReferencedAttributes(AstNode const* node,
|
|||
return result;
|
||||
}
|
||||
|
||||
bool Ast::populateSingleAttributeAccess(AstNode const* node,
|
||||
Variable const* variable,
|
||||
std::vector<std::string>& attributeName) {
|
||||
bool result = true;
|
||||
|
||||
auto doNothingVisitor = [](AstNode const* node, void* data) -> void {};
|
||||
|
||||
attributeName.clear();
|
||||
std::vector<std::string> attributePath;
|
||||
|
||||
auto visitor = [&](AstNode const* node, void* data) -> void {
|
||||
if (node == nullptr || !result) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
attributePath.emplace_back(node->getString());
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->type == NODE_TYPE_REFERENCE) {
|
||||
// reference to a variable
|
||||
auto v = static_cast<Variable const*>(node->getData());
|
||||
|
||||
if (v == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
if (v->id == variable->id) {
|
||||
// the variable we are looking for
|
||||
if (attributeName.empty()) {
|
||||
// haven't seen an attribute before. so store the attribute we got
|
||||
attributeName = std::move(attributePath);
|
||||
} else {
|
||||
// have seen some attribute before. now check if it's the same attribute
|
||||
size_t const n = attributeName.size();
|
||||
if (n != attributePath.size()) {
|
||||
// different attributes
|
||||
result = false;
|
||||
} else {
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
if (attributePath[i] != attributeName[i]) {
|
||||
// different attributes
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// fall-through
|
||||
}
|
||||
|
||||
attributePath.clear();
|
||||
};
|
||||
|
||||
traverseReadOnly(node, visitor, doNothingVisitor, doNothingVisitor, nullptr);
|
||||
if (attributeName.empty()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// @brief checks if the only references to the specified variable are
|
||||
/// attribute accesses to the specified attribute. all other variables
|
||||
/// used in the expression are ignored and will not influence the result!
|
||||
bool Ast::variableOnlyUsedForSingleAttributeAccess(AstNode const* node,
|
||||
Variable const* variable,
|
||||
std::vector<std::string> const& attributeName) {
|
||||
bool result = true;
|
||||
|
||||
auto doNothingVisitor = [](AstNode const* node, void* data) -> void {};
|
||||
|
||||
// traversal state
|
||||
std::vector<std::string> attributePath;
|
||||
|
||||
auto visitor = [&](AstNode const* node, void* data) -> void {
|
||||
if (node == nullptr || !result) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->type == NODE_TYPE_ATTRIBUTE_ACCESS) {
|
||||
attributePath.emplace_back(node->getString());
|
||||
return;
|
||||
}
|
||||
|
||||
if (node->type == NODE_TYPE_REFERENCE) {
|
||||
// reference to a variable
|
||||
auto v = static_cast<Variable const*>(node->getData());
|
||||
|
||||
if (v == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
if (v->id == variable->id) {
|
||||
// the variable we are looking for
|
||||
if (attributePath.size() != attributeName.size()) {
|
||||
// different attribute
|
||||
result = false;
|
||||
} else {
|
||||
size_t const n = attributeName.size();
|
||||
TRI_ASSERT(n == attributePath.size());
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
if (attributePath[i] != attributeName[i]) {
|
||||
// different attributes
|
||||
result = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// fall-through
|
||||
}
|
||||
|
||||
attributePath.clear();
|
||||
};
|
||||
|
||||
traverseReadOnly(node, visitor, doNothingVisitor, doNothingVisitor, nullptr);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/// @brief recursively clone a node
|
||||
AstNode* Ast::clone(AstNode const* node) {
|
||||
AstNodeType const type = node->type;
|
||||
|
|
|
@ -387,6 +387,19 @@ class Ast {
|
|||
/// @brief determines the top-level attributes in an expression, grouped by
|
||||
/// variable
|
||||
static TopLevelAttributes getReferencedAttributes(AstNode const*, bool&);
|
||||
|
||||
static bool populateSingleAttributeAccess(AstNode const* node,
|
||||
Variable const* variable,
|
||||
std::vector<std::string>& attributeName);
|
||||
|
||||
static bool variableOnlyUsedForSingleAttributeAccess(AstNode const* node,
|
||||
Variable const* variable,
|
||||
std::vector<std::string> const& attributeName);
|
||||
|
||||
/// @brief replace an attribute access with just the variable
|
||||
static AstNode* replaceAttributeAccess(AstNode* node,
|
||||
Variable const* variable,
|
||||
std::vector<std::string> const& attributeName);
|
||||
|
||||
/// @brief recursively clone a node
|
||||
AstNode* clone(AstNode const*);
|
||||
|
|
|
@ -131,10 +131,12 @@ class CollectNode : public ExecutionNode {
|
|||
|
||||
/// @brief whether or not the count flag is set
|
||||
inline bool count() const { return _count; }
|
||||
|
||||
inline bool hasOutVariableButNoCount() const { return (_outVariable != nullptr && !_count); }
|
||||
|
||||
/// @brief whether or not the node has an outVariable (i.e. INTO ...)
|
||||
inline bool hasOutVariable() const { return _outVariable != nullptr; }
|
||||
|
||||
|
||||
/// @brief return the out variable
|
||||
Variable const* outVariable() const { return _outVariable; }
|
||||
|
||||
|
|
|
@ -0,0 +1,141 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "DocumentProducingBlock.h"
|
||||
#include "Aql/AqlItemBlock.h"
|
||||
#include "Aql/DocumentProducingNode.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/IndexNode.h"
|
||||
#include "Aql/Variable.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "StorageEngine/StorageEngine.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
|
||||
DocumentProducingBlock::DocumentProducingBlock(DocumentProducingNode const* node, transaction::Methods* trx)
|
||||
: _trxPtr(trx),
|
||||
_node(node),
|
||||
_produceResult(dynamic_cast<ExecutionNode const*>(_node)->isVarUsedLater(_node->outVariable())),
|
||||
_useRawDocumentPointers(EngineSelectorFeature::ENGINE->useRawDocumentPointers()),
|
||||
_documentProducer(buildCallback()) {
|
||||
}
|
||||
|
||||
DocumentProducingBlock::DocumentProducingFunction DocumentProducingBlock::buildCallback() const {
|
||||
if (!_produceResult) {
|
||||
// no result needed
|
||||
return [](AqlItemBlock* res, VPackSlice, size_t registerId, size_t& row) {
|
||||
if (row > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(row, static_cast<RegisterId>(registerId));
|
||||
}
|
||||
++row;
|
||||
};
|
||||
}
|
||||
|
||||
auto const& projection = _node->projection();
|
||||
|
||||
if (projection.size() == 1) {
|
||||
if (projection[0] == "_id") {
|
||||
// return _id attribute
|
||||
return [this](AqlItemBlock* res, VPackSlice slice, size_t registerId, size_t& row) {
|
||||
VPackSlice found = transaction::helpers::extractIdFromDocument(slice);
|
||||
if (found.isCustom()) {
|
||||
// _id as a custom type needs special treatment
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(transaction::helpers::extractIdString(_trxPtr->resolver(), found, slice)));
|
||||
} else {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintCopy(found.begin())));
|
||||
}
|
||||
if (row > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(row, static_cast<RegisterId>(registerId));
|
||||
}
|
||||
++row;
|
||||
};
|
||||
} else if (projection[0] == "_key") {
|
||||
// return _key attribute
|
||||
return [this](AqlItemBlock* res, VPackSlice slice, size_t registerId, size_t& row) {
|
||||
VPackSlice found = transaction::helpers::extractKeyFromDocument(slice);
|
||||
if (_useRawDocumentPointers) {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintNoCopy(found.begin())));
|
||||
} else {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintCopy(found.begin())));
|
||||
}
|
||||
if (row > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(row, static_cast<RegisterId>(registerId));
|
||||
}
|
||||
++row;
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
if (!projection.empty()) {
|
||||
// return another projection
|
||||
return [this](AqlItemBlock* res, VPackSlice slice, size_t registerId, size_t& row) {
|
||||
slice = slice.get(_node->projection());
|
||||
if (slice.isNone()) {
|
||||
// attribute not found
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(VPackSlice::nullSlice()));
|
||||
} else {
|
||||
uint8_t const* vpack = slice.begin();
|
||||
if (_useRawDocumentPointers) {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintNoCopy(vpack)));
|
||||
} else {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintCopy(vpack)));
|
||||
}
|
||||
}
|
||||
if (row > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(row, static_cast<RegisterId>(registerId));
|
||||
}
|
||||
++row;
|
||||
};
|
||||
}
|
||||
|
||||
// return the document as is
|
||||
return [this](AqlItemBlock* res, VPackSlice slice, size_t registerId, size_t& row) {
|
||||
uint8_t const* vpack = slice.begin();
|
||||
if (_useRawDocumentPointers) {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintNoCopy(vpack)));
|
||||
} else {
|
||||
res->setValue(row, static_cast<arangodb::aql::RegisterId>(registerId),
|
||||
AqlValue(AqlValueHintCopy(vpack)));
|
||||
}
|
||||
if (row > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(row, static_cast<RegisterId>(registerId));
|
||||
}
|
||||
++row;
|
||||
};
|
||||
}
|
|
@ -0,0 +1,74 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_DOCUMENT_PRODUCING_BLOCK_H
|
||||
#define ARANGOD_AQL_DOCUMENT_PRODUCING_BLOCK_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include <velocypack/Slice.h>
|
||||
|
||||
#include <functional>
|
||||
|
||||
namespace arangodb {
|
||||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
|
||||
namespace aql {
|
||||
class AqlItemBlock;
|
||||
class DocumentProducingNode;
|
||||
struct Variable;
|
||||
|
||||
class DocumentProducingBlock {
|
||||
public:
|
||||
typedef std::function<void(AqlItemBlock*, arangodb::velocypack::Slice, size_t, size_t&)> DocumentProducingFunction;
|
||||
|
||||
DocumentProducingBlock(DocumentProducingNode const* node, transaction::Methods* trx);
|
||||
virtual ~DocumentProducingBlock() = default;
|
||||
|
||||
public:
|
||||
bool produceResult() const { return _produceResult; }
|
||||
|
||||
private:
|
||||
DocumentProducingFunction buildCallback() const;
|
||||
|
||||
private:
|
||||
transaction::Methods* _trxPtr;
|
||||
|
||||
DocumentProducingNode const* _node;
|
||||
|
||||
/// @brief hether or not we want to build a result
|
||||
bool const _produceResult;
|
||||
|
||||
/// @brief whether or not we are allowed to pass documents via raw pointers only
|
||||
bool const _useRawDocumentPointers;
|
||||
|
||||
protected:
|
||||
DocumentProducingFunction const _documentProducer;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -0,0 +1,67 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "DocumentProducingNode.h"
|
||||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Aql/Variable.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/Value.h>
|
||||
#include <velocypack/ValueType.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb::aql;
|
||||
|
||||
DocumentProducingNode::DocumentProducingNode(Variable const* outVariable)
|
||||
: _outVariable(outVariable) {
|
||||
TRI_ASSERT(_outVariable != nullptr);
|
||||
}
|
||||
|
||||
DocumentProducingNode::DocumentProducingNode(ExecutionPlan* plan,
|
||||
arangodb::velocypack::Slice slice)
|
||||
: _outVariable(Variable::varFromVPack(plan->getAst(), slice, "outVariable")) {
|
||||
TRI_ASSERT(_outVariable != nullptr);
|
||||
|
||||
if (slice.hasKey("projection")) {
|
||||
VPackSlice p = slice.get("projection");
|
||||
if (p.isArray()) {
|
||||
for (auto const& it : VPackArrayIterator(p)) {
|
||||
_projection.emplace_back(it.copyString());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void DocumentProducingNode::toVelocyPack(arangodb::velocypack::Builder& builder) const {
|
||||
builder.add(VPackValue("outVariable"));
|
||||
_outVariable->toVelocyPack(builder);
|
||||
|
||||
if (!_projection.empty()) {
|
||||
builder.add("projection", VPackValue(VPackValueType::Array));
|
||||
for (auto const& it : _projection) {
|
||||
builder.add(VPackValue(it));
|
||||
}
|
||||
builder.close();
|
||||
}
|
||||
}
|
|
@ -0,0 +1,72 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_AQL_DOCUMENT_PRODUCING_NODE_H
|
||||
#define ARANGOD_AQL_DOCUMENT_PRODUCING_NODE_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
#include <velocypack/Slice.h>
|
||||
|
||||
namespace arangodb {
|
||||
namespace aql {
|
||||
class ExecutionPlan;
|
||||
struct Variable;
|
||||
|
||||
class DocumentProducingNode {
|
||||
public:
|
||||
explicit DocumentProducingNode(Variable const* outVariable);
|
||||
DocumentProducingNode(ExecutionPlan* plan, arangodb::velocypack::Slice slice);
|
||||
|
||||
virtual ~DocumentProducingNode() = default;
|
||||
|
||||
public:
|
||||
/// @brief return the out variable
|
||||
Variable const* outVariable() const { return _outVariable; }
|
||||
|
||||
std::vector<std::string> const& projection() const {
|
||||
return _projection;
|
||||
}
|
||||
|
||||
void setProjection(std::vector<std::string> const& attributeNames) {
|
||||
_projection = attributeNames;
|
||||
}
|
||||
|
||||
void setProjection(std::vector<std::string>&& attributeNames) {
|
||||
_projection = std::move(attributeNames);
|
||||
}
|
||||
|
||||
void toVelocyPack(arangodb::velocypack::Builder& builder) const;
|
||||
|
||||
protected:
|
||||
Variable const* _outVariable;
|
||||
|
||||
/// @brief produce only the following attribute (with possible subattributes)
|
||||
std::vector<std::string> _projection;
|
||||
};
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -31,6 +31,8 @@
|
|||
#include "Cluster/FollowerInfo.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "StorageEngine/DocumentIdentifierToken.h"
|
||||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
@ -40,22 +42,20 @@ using namespace arangodb::aql;
|
|||
|
||||
EnumerateCollectionBlock::EnumerateCollectionBlock(
|
||||
ExecutionEngine* engine, EnumerateCollectionNode const* ep)
|
||||
: ExecutionBlock(engine, ep),
|
||||
: ExecutionBlock(engine, ep),
|
||||
DocumentProducingBlock(ep, _trx),
|
||||
_collection(ep->_collection),
|
||||
_mmdr(new ManagedDocumentResult),
|
||||
_cursor(
|
||||
_trx->indexScan(_collection->getName(),
|
||||
(ep->_random ? transaction::Methods::CursorType::ANY
|
||||
: transaction::Methods::CursorType::ALL),
|
||||
_mmdr.get(), 0, UINT64_MAX, 1000, false)),
|
||||
_mustStoreResult(true) {
|
||||
_mmdr.get(), 0, UINT64_MAX, 1000, false)) {
|
||||
TRI_ASSERT(_cursor->successful());
|
||||
}
|
||||
|
||||
int EnumerateCollectionBlock::initialize() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
auto ep = static_cast<EnumerateCollectionNode const*>(_exeNode);
|
||||
_mustStoreResult = ep->isVarUsedLater(ep->_outVariable);
|
||||
|
||||
if (_collection->isSatellite()) {
|
||||
auto logicalCollection = _collection->getCollection();
|
||||
|
@ -135,7 +135,7 @@ AqlItemBlock* EnumerateCollectionBlock::getSome(size_t, // atLeast,
|
|||
traceGetSomeEnd(nullptr);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
|
||||
bool needMore;
|
||||
AqlItemBlock* cur = nullptr;
|
||||
size_t send = 0;
|
||||
|
@ -185,35 +185,26 @@ AqlItemBlock* EnumerateCollectionBlock::getSome(size_t, // atLeast,
|
|||
// only copy 1st row of registers inherited from previous frame(s)
|
||||
inheritRegisters(cur, res.get(), _pos);
|
||||
|
||||
IndexIterator::DocumentCallback cb;
|
||||
if (_mustStoreResult) {
|
||||
cb = [&](ManagedDocumentResult const& mdr) {
|
||||
res->setValue(send,
|
||||
static_cast<arangodb::aql::RegisterId>(curRegs),
|
||||
mdr.createAqlValue());
|
||||
if (send > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(send, static_cast<RegisterId>(curRegs));
|
||||
}
|
||||
++send;
|
||||
};
|
||||
} else {
|
||||
cb = [&](ManagedDocumentResult const& mdr) {
|
||||
if (send > 0) {
|
||||
// re-use already copied AQLValues
|
||||
res->copyValuesFromFirstRow(send, static_cast<RegisterId>(curRegs));
|
||||
}
|
||||
++send;
|
||||
};
|
||||
}
|
||||
|
||||
throwIfKilled(); // check if we were aborted
|
||||
|
||||
TRI_IF_FAILURE("EnumerateCollectionBlock::moreDocuments") {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
bool tmp = _cursor->nextDocument(cb, atMost);
|
||||
|
||||
bool tmp;
|
||||
if (produceResult()) {
|
||||
// properly build up results by fetching the actual documents
|
||||
// using nextDocument()
|
||||
tmp = _cursor->nextDocument([&](DocumentIdentifierToken const&, VPackSlice slice) {
|
||||
_documentProducer(res.get(), slice, curRegs, send);
|
||||
}, atMost);
|
||||
} else {
|
||||
// performance optimization: we do not need the documents at all,
|
||||
// so just call next()
|
||||
tmp = _cursor->next([&](DocumentIdentifierToken const&) {
|
||||
_documentProducer(res.get(), VPackSlice::nullSlice(), curRegs, send);
|
||||
}, atMost);
|
||||
}
|
||||
if (!tmp) {
|
||||
TRI_ASSERT(!_cursor->hasMore());
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#ifndef ARANGOD_AQL_ENUMERATE_COLLECTION_BLOCK_H
|
||||
#define ARANGOD_AQL_ENUMERATE_COLLECTION_BLOCK_H 1
|
||||
|
||||
#include "Aql/DocumentProducingBlock.h"
|
||||
#include "Aql/ExecutionBlock.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
|
||||
|
@ -41,7 +42,7 @@ class AqlItemBlock;
|
|||
struct Collection;
|
||||
class ExecutionEngine;
|
||||
|
||||
class EnumerateCollectionBlock final : public ExecutionBlock {
|
||||
class EnumerateCollectionBlock final : public ExecutionBlock, public DocumentProducingBlock {
|
||||
public:
|
||||
EnumerateCollectionBlock(ExecutionEngine* engine,
|
||||
EnumerateCollectionNode const* ep);
|
||||
|
@ -70,9 +71,6 @@ class EnumerateCollectionBlock final : public ExecutionBlock {
|
|||
|
||||
/// @brief cursor
|
||||
std::unique_ptr<OperationCursor> _cursor;
|
||||
|
||||
/// @brief whether or not the enumerated documents need to be stored
|
||||
bool _mustStoreResult;
|
||||
};
|
||||
|
||||
} // namespace arangodb::aql
|
||||
|
|
|
@ -38,14 +38,14 @@ ExecutionBlock::ExecutionBlock(ExecutionEngine* engine, ExecutionNode const* ep)
|
|||
_exeNode(ep),
|
||||
_pos(0),
|
||||
_done(false),
|
||||
_tracing(engine->getQuery()->queryOptions().tracing) {}
|
||||
_tracing(engine->getQuery()->queryOptions().tracing) {
|
||||
TRI_ASSERT(_trx != nullptr);
|
||||
}
|
||||
|
||||
ExecutionBlock::~ExecutionBlock() {
|
||||
for (auto& it : _buffer) {
|
||||
delete it;
|
||||
}
|
||||
|
||||
_buffer.clear();
|
||||
}
|
||||
|
||||
/// @brief returns the register id for a variable id
|
||||
|
|
|
@ -111,7 +111,7 @@ void ExecutionNode::getSortElements(SortElementVector& elements,
|
|||
|
||||
for (auto const& it : VPackArrayIterator(elementsSlice)) {
|
||||
bool ascending = it.get("ascending").getBoolean();
|
||||
Variable* v = varFromVPack(plan->getAst(), it, "inVariable");
|
||||
Variable* v = Variable::varFromVPack(plan->getAst(), it, "inVariable");
|
||||
elements.emplace_back(v, ascending);
|
||||
// Is there an attribute path?
|
||||
VPackSlice path = it.get("paths");
|
||||
|
@ -156,9 +156,9 @@ ExecutionNode* ExecutionNode::fromVPackFactory(
|
|||
}
|
||||
case COLLECT: {
|
||||
Variable* expressionVariable =
|
||||
varFromVPack(plan->getAst(), slice, "expressionVariable", Optional);
|
||||
Variable::varFromVPack(plan->getAst(), slice, "expressionVariable", Optional);
|
||||
Variable* outVariable =
|
||||
varFromVPack(plan->getAst(), slice, "outVariable", Optional);
|
||||
Variable::varFromVPack(plan->getAst(), slice, "outVariable", Optional);
|
||||
|
||||
// keepVariables
|
||||
std::vector<Variable const*> keepVariables;
|
||||
|
@ -166,7 +166,7 @@ ExecutionNode* ExecutionNode::fromVPackFactory(
|
|||
if (keepVariablesSlice.isArray()) {
|
||||
for (auto const& it : VPackArrayIterator(keepVariablesSlice)) {
|
||||
Variable const* variable =
|
||||
varFromVPack(plan->getAst(), it, "variable");
|
||||
Variable::varFromVPack(plan->getAst(), it, "variable");
|
||||
keepVariables.emplace_back(variable);
|
||||
}
|
||||
}
|
||||
|
@ -182,8 +182,8 @@ ExecutionNode* ExecutionNode::fromVPackFactory(
|
|||
{
|
||||
groupVariables.reserve(groupsSlice.length());
|
||||
for (auto const& it : VPackArrayIterator(groupsSlice)) {
|
||||
Variable* outVar = varFromVPack(plan->getAst(), it, "outVariable");
|
||||
Variable* inVar = varFromVPack(plan->getAst(), it, "inVariable");
|
||||
Variable* outVar = Variable::varFromVPack(plan->getAst(), it, "outVariable");
|
||||
Variable* inVar = Variable::varFromVPack(plan->getAst(), it, "inVariable");
|
||||
|
||||
groupVariables.emplace_back(std::make_pair(outVar, inVar));
|
||||
}
|
||||
|
@ -202,8 +202,8 @@ ExecutionNode* ExecutionNode::fromVPackFactory(
|
|||
{
|
||||
aggregateVariables.reserve(aggregatesSlice.length());
|
||||
for (auto const& it : VPackArrayIterator(aggregatesSlice)) {
|
||||
Variable* outVar = varFromVPack(plan->getAst(), it, "outVariable");
|
||||
Variable* inVar = varFromVPack(plan->getAst(), it, "inVariable");
|
||||
Variable* outVar = Variable::varFromVPack(plan->getAst(), it, "outVariable");
|
||||
Variable* inVar = Variable::varFromVPack(plan->getAst(), it, "inVariable");
|
||||
|
||||
std::string const type = it.get("type").copyString();
|
||||
aggregateVariables.emplace_back(
|
||||
|
@ -558,25 +558,6 @@ ExecutionNode const* ExecutionNode::getLoop() const {
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
/// @brief factory for (optional) variables from VPack
|
||||
Variable* ExecutionNode::varFromVPack(Ast* ast,
|
||||
arangodb::velocypack::Slice const& base,
|
||||
char const* variableName, bool optional) {
|
||||
VPackSlice variable = base.get(variableName);
|
||||
|
||||
if (variable.isNone()) {
|
||||
if (optional) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::string msg;
|
||||
msg +=
|
||||
"mandatory variable \"" + std::string(variableName) + "\" not found.";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg);
|
||||
}
|
||||
return ast->variables()->createVariable(variable);
|
||||
}
|
||||
|
||||
/// @brief toVelocyPackHelper, for a generic node
|
||||
/// Note: The input nodes has to be an Array Element that is still Open.
|
||||
/// At the end of this function the current-nodes Object is OPEN and
|
||||
|
@ -1153,14 +1134,13 @@ double SingletonNode::estimateCost(size_t& nrItems) const {
|
|||
EnumerateCollectionNode::EnumerateCollectionNode(
|
||||
ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
DocumentProducingNode(plan, base),
|
||||
_vocbase(plan->getAst()->query()->vocbase()),
|
||||
_collection(plan->getAst()->query()->collections()->get(
|
||||
base.get("collection").copyString())),
|
||||
_outVariable(varFromVPack(plan->getAst(), base, "outVariable")),
|
||||
_random(base.get("random").getBoolean()) {
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
TRI_ASSERT(_outVariable != nullptr);
|
||||
}
|
||||
|
||||
/// @brief toVelocyPack, for EnumerateCollectionNode
|
||||
|
@ -1172,11 +1152,12 @@ void EnumerateCollectionNode::toVelocyPackHelper(VPackBuilder& nodes,
|
|||
// Now put info about vocbase and cid in there
|
||||
nodes.add("database", VPackValue(_vocbase->name()));
|
||||
nodes.add("collection", VPackValue(_collection->getName()));
|
||||
nodes.add(VPackValue("outVariable"));
|
||||
_outVariable->toVelocyPack(nodes);
|
||||
nodes.add("random", VPackValue(_random));
|
||||
nodes.add("satellite", VPackValue(_collection->isSatellite()));
|
||||
|
||||
// add outvariable and projection
|
||||
DocumentProducingNode::toVelocyPack(nodes);
|
||||
|
||||
// And close it:
|
||||
nodes.close();
|
||||
}
|
||||
|
@ -1194,6 +1175,8 @@ ExecutionNode* EnumerateCollectionNode::clone(ExecutionPlan* plan,
|
|||
auto c = new EnumerateCollectionNode(plan, _id, _vocbase, _collection,
|
||||
outVariable, _random);
|
||||
|
||||
c->setProjection(_projection);
|
||||
|
||||
cloneHelper(c, plan, withDependencies, withProperties);
|
||||
|
||||
return static_cast<ExecutionNode*>(c);
|
||||
|
@ -1217,8 +1200,8 @@ double EnumerateCollectionNode::estimateCost(size_t& nrItems) const {
|
|||
EnumerateListNode::EnumerateListNode(ExecutionPlan* plan,
|
||||
arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
_inVariable(varFromVPack(plan->getAst(), base, "inVariable")),
|
||||
_outVariable(varFromVPack(plan->getAst(), base, "outVariable")) {}
|
||||
_inVariable(Variable::varFromVPack(plan->getAst(), base, "inVariable")),
|
||||
_outVariable(Variable::varFromVPack(plan->getAst(), base, "outVariable")) {}
|
||||
|
||||
/// @brief toVelocyPack, for EnumerateListNode
|
||||
void EnumerateListNode::toVelocyPackHelper(VPackBuilder& nodes,
|
||||
|
@ -1336,8 +1319,8 @@ double LimitNode::estimateCost(size_t& nrItems) const {
|
|||
CalculationNode::CalculationNode(ExecutionPlan* plan,
|
||||
arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
_conditionVariable(varFromVPack(plan->getAst(), base, "conditionVariable", true)),
|
||||
_outVariable(varFromVPack(plan->getAst(), base, "outVariable")),
|
||||
_conditionVariable(Variable::varFromVPack(plan->getAst(), base, "conditionVariable", true)),
|
||||
_outVariable(Variable::varFromVPack(plan->getAst(), base, "outVariable")),
|
||||
_expression(new Expression(plan->getAst(), base)),
|
||||
_canRemoveIfThrows(false) {}
|
||||
|
||||
|
@ -1399,7 +1382,7 @@ SubqueryNode::SubqueryNode(ExecutionPlan* plan,
|
|||
arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
_subquery(nullptr),
|
||||
_outVariable(varFromVPack(plan->getAst(), base, "outVariable")) {}
|
||||
_outVariable(Variable::varFromVPack(plan->getAst(), base, "outVariable")) {}
|
||||
|
||||
/// @brief toVelocyPack, for SubqueryNode
|
||||
void SubqueryNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
|
@ -1618,7 +1601,7 @@ bool SubqueryNode::isDeterministic() {
|
|||
|
||||
FilterNode::FilterNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
_inVariable(varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
_inVariable(Variable::varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
|
||||
/// @brief toVelocyPack, for FilterNode
|
||||
void FilterNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
|
@ -1663,7 +1646,7 @@ double FilterNode::estimateCost(size_t& nrItems) const {
|
|||
|
||||
ReturnNode::ReturnNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
_inVariable(varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
_inVariable(Variable::varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
|
||||
/// @brief toVelocyPack, for ReturnNode
|
||||
void ReturnNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
|
|
|
@ -56,6 +56,7 @@
|
|||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/types.h"
|
||||
#include "Aql/DocumentProducingNode.h"
|
||||
#include "Aql/Expression.h"
|
||||
#include "Aql/Variable.h"
|
||||
#include "Aql/WalkerWorker.h"
|
||||
|
@ -576,9 +577,6 @@ class ExecutionNode {
|
|||
ExecutionNode const* getLoop() const;
|
||||
|
||||
protected:
|
||||
static Variable* varFromVPack(Ast* ast, arangodb::velocypack::Slice const& base,
|
||||
char const* variableName, bool optional = false);
|
||||
|
||||
/// @brief factory for sort elements
|
||||
static void getSortElements(SortElementVector& elements, ExecutionPlan* plan,
|
||||
arangodb::velocypack::Slice const& slice,
|
||||
|
@ -682,7 +680,7 @@ class SingletonNode : public ExecutionNode {
|
|||
};
|
||||
|
||||
/// @brief class EnumerateCollectionNode
|
||||
class EnumerateCollectionNode : public ExecutionNode {
|
||||
class EnumerateCollectionNode : public ExecutionNode, public DocumentProducingNode {
|
||||
friend class ExecutionNode;
|
||||
friend class ExecutionBlock;
|
||||
friend class EnumerateCollectionBlock;
|
||||
|
@ -693,18 +691,17 @@ class EnumerateCollectionNode : public ExecutionNode {
|
|||
TRI_vocbase_t* vocbase, Collection* collection,
|
||||
Variable const* outVariable, bool random)
|
||||
: ExecutionNode(plan, id),
|
||||
DocumentProducingNode(outVariable),
|
||||
_vocbase(vocbase),
|
||||
_collection(collection),
|
||||
_outVariable(outVariable),
|
||||
_random(random) {
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
TRI_ASSERT(_outVariable != nullptr);
|
||||
}
|
||||
|
||||
EnumerateCollectionNode(ExecutionPlan* plan,
|
||||
arangodb::velocypack::Slice const& base);
|
||||
|
||||
|
||||
/// @brief return the type of the node
|
||||
NodeType getType() const override final { return ENUMERATE_COLLECTION; }
|
||||
|
||||
|
@ -738,9 +735,6 @@ class EnumerateCollectionNode : public ExecutionNode {
|
|||
/// @brief return the collection
|
||||
Collection const* collection() const { return _collection; }
|
||||
|
||||
/// @brief return the out variable
|
||||
Variable const* outVariable() const { return _outVariable; }
|
||||
|
||||
private:
|
||||
/// @brief the database
|
||||
TRI_vocbase_t* _vocbase;
|
||||
|
@ -748,9 +742,6 @@ class EnumerateCollectionNode : public ExecutionNode {
|
|||
/// @brief collection
|
||||
Collection* _collection;
|
||||
|
||||
/// @brief output variable
|
||||
Variable const* _outVariable;
|
||||
|
||||
/// @brief whether or not we want random iteration
|
||||
bool _random;
|
||||
};
|
||||
|
|
|
@ -243,6 +243,35 @@ void Expression::replaceVariableReference(Variable const* variable,
|
|||
_hasDeterminedAttributes = false;
|
||||
}
|
||||
|
||||
void Expression::replaceAttributeAccess(Variable const* variable,
|
||||
std::vector<std::string> const& attribute) {
|
||||
_node = _ast->clone(_node);
|
||||
TRI_ASSERT(_node != nullptr);
|
||||
|
||||
_node = _ast->replaceAttributeAccess(const_cast<AstNode*>(_node), variable, attribute);
|
||||
invalidate();
|
||||
|
||||
if (_type == ATTRIBUTE_SYSTEM || _type == ATTRIBUTE_DYNAMIC) {
|
||||
if (_built) {
|
||||
delete _accessor;
|
||||
_accessor = nullptr;
|
||||
_built = false;
|
||||
}
|
||||
// must even set back the expression type so the expression will be analyzed
|
||||
// again
|
||||
_type = UNPROCESSED;
|
||||
} else if (_type == SIMPLE) {
|
||||
// must rebuild the expression completely, as it may have changed drastically
|
||||
_built = false;
|
||||
_type = UNPROCESSED;
|
||||
_node->clearFlagsRecursive(); // recursively delete the node's flags
|
||||
}
|
||||
|
||||
const_cast<AstNode*>(_node)->clearFlags();
|
||||
_attributes.clear();
|
||||
_hasDeterminedAttributes = false;
|
||||
}
|
||||
|
||||
/// @brief invalidates an expression
|
||||
/// this only has an effect for V8-based functions, which need to be created,
|
||||
/// used and destroyed in the same context. when a V8 function is used across
|
||||
|
@ -657,7 +686,6 @@ AqlValue Expression::executeSimpleExpressionArray(
|
|||
auto member = node->getMemberUnchecked(i);
|
||||
bool localMustDestroy = false;
|
||||
AqlValue result = executeSimpleExpression(member, trx, localMustDestroy, false);
|
||||
|
||||
AqlValueGuard guard(result, localMustDestroy);
|
||||
result.toVelocyPack(trx, *builder.get(), false);
|
||||
}
|
||||
|
|
|
@ -37,7 +37,6 @@ namespace arangodb {
|
|||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
;
|
||||
|
||||
namespace basics {
|
||||
class StringBuffer;
|
||||
|
@ -71,7 +70,7 @@ class Expression {
|
|||
~Expression();
|
||||
|
||||
/// @brief replace the root node
|
||||
inline void replaceNode (AstNode* node) {
|
||||
void replaceNode (AstNode* node) {
|
||||
_node = node;
|
||||
invalidate();
|
||||
}
|
||||
|
@ -196,9 +195,10 @@ class Expression {
|
|||
|
||||
/// @brief replace a variable reference in the expression with another
|
||||
/// expression (e.g. inserting c = `a + b` into expression `c + 1` so the
|
||||
/// latter
|
||||
/// becomes `a + b + 1`
|
||||
/// latter becomes `a + b + 1`
|
||||
void replaceVariableReference(Variable const*, AstNode const*);
|
||||
|
||||
void replaceAttributeAccess(Variable const*, std::vector<std::string> const& attribute);
|
||||
|
||||
/// @brief invalidates an expression
|
||||
/// this only has an effect for V8-based functions, which need to be created,
|
||||
|
|
|
@ -1846,9 +1846,7 @@ AqlValue Functions::Hash(arangodb::aql::Query* query,
|
|||
// without precision loss when storing in JavaScript etc.
|
||||
uint64_t hash = value.hash(trx) & 0x0007ffffffffffffULL;
|
||||
|
||||
transaction::BuilderLeaser builder(trx);
|
||||
builder->add(VPackValue(hash));
|
||||
return AqlValue(builder.get());
|
||||
return AqlValue(hash);
|
||||
}
|
||||
|
||||
/// @brief function UNIQUE
|
||||
|
|
|
@ -288,9 +288,6 @@ GraphNode::GraphNode(ExecutionPlan* plan,
|
|||
_directions.emplace_back(d);
|
||||
}
|
||||
|
||||
// TODO SP differs from here:
|
||||
|
||||
// TODO: Can we remove this?
|
||||
// Graph Information. Do we need to reload the graph here?
|
||||
std::string graphName;
|
||||
if (base.hasKey("graph") && (base.get("graph").isString())) {
|
||||
|
@ -340,20 +337,18 @@ GraphNode::GraphNode(ExecutionPlan* plan,
|
|||
std::make_unique<aql::Collection>(v, _vocbase, AccessMode::Type::READ));
|
||||
}
|
||||
|
||||
// ENDOF TODO SP differs
|
||||
|
||||
// Out variables
|
||||
if (base.hasKey("vertexOutVariable")) {
|
||||
_vertexOutVariable =
|
||||
varFromVPack(plan->getAst(), base, "vertexOutVariable");
|
||||
Variable::varFromVPack(plan->getAst(), base, "vertexOutVariable");
|
||||
}
|
||||
if (base.hasKey("edgeOutVariable")) {
|
||||
_edgeOutVariable = varFromVPack(plan->getAst(), base, "edgeOutVariable");
|
||||
_edgeOutVariable = Variable::varFromVPack(plan->getAst(), base, "edgeOutVariable");
|
||||
}
|
||||
|
||||
// Temporary Filter Objects
|
||||
TRI_ASSERT(base.hasKey("tmpObjVariable"));
|
||||
_tmpObjVariable = varFromVPack(plan->getAst(), base, "tmpObjVariable");
|
||||
_tmpObjVariable = Variable::varFromVPack(plan->getAst(), base, "tmpObjVariable");
|
||||
|
||||
TRI_ASSERT(base.hasKey("tmpObjVarNode"));
|
||||
_tmpObjVarNode = new AstNode(plan->getAst(), base.get("tmpObjVarNode"));
|
||||
|
|
|
@ -48,6 +48,7 @@ using namespace arangodb::aql;
|
|||
|
||||
IndexBlock::IndexBlock(ExecutionEngine* engine, IndexNode const* en)
|
||||
: ExecutionBlock(engine, en),
|
||||
DocumentProducingBlock(en, _trx),
|
||||
_collection(en->collection()),
|
||||
_currentIndex(0),
|
||||
_indexes(en->getIndexes()),
|
||||
|
@ -405,7 +406,11 @@ bool IndexBlock::skipIndex(size_t atMost) {
|
|||
THROW_ARANGO_EXCEPTION(TRI_ERROR_DEBUG);
|
||||
}
|
||||
|
||||
if (_cursor->skip(atMost - _returned, _returned)) {
|
||||
uint64_t returned = (uint64_t) _returned;
|
||||
bool ok = _cursor->skip(atMost - returned, returned);
|
||||
_returned = (size_t) returned;
|
||||
|
||||
if (ok) {
|
||||
// We have skipped enough.
|
||||
// And this index could return more.
|
||||
// We are good.
|
||||
|
@ -502,46 +507,29 @@ AqlItemBlock* IndexBlock::getSome(size_t atLeast, size_t atMost) {
|
|||
IndexIterator::DocumentCallback callback;
|
||||
if (_indexes.size() > 1) {
|
||||
// Activate uniqueness checks
|
||||
callback = [&](ManagedDocumentResult const& mdr) {
|
||||
callback = [&](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
TRI_ASSERT(res.get() != nullptr);
|
||||
if (!_isLastIndex) {
|
||||
// insert & check for duplicates in one go
|
||||
if (!_alreadyReturned.emplace(mdr.lastRevisionId()).second) {
|
||||
if (!_alreadyReturned.emplace(token._data).second) {
|
||||
// Document already in list. Skip this
|
||||
return;
|
||||
}
|
||||
} else {
|
||||
// only check for duplicates
|
||||
if (_alreadyReturned.find(mdr.lastRevisionId()) != _alreadyReturned.end()) {
|
||||
if (_alreadyReturned.find(token._data) != _alreadyReturned.end()) {
|
||||
// Document found, skip
|
||||
return;
|
||||
}
|
||||
}
|
||||
res->setValue(_returned,
|
||||
static_cast<arangodb::aql::RegisterId>(curRegs),
|
||||
mdr.createAqlValue());
|
||||
|
||||
if (_returned > 0) {
|
||||
// re-use already copied AqlValues
|
||||
res->copyValuesFromFirstRow(_returned,
|
||||
static_cast<RegisterId>(curRegs));
|
||||
}
|
||||
++_returned;
|
||||
|
||||
_documentProducer(res.get(), slice, curRegs, _returned);
|
||||
};
|
||||
} else {
|
||||
// No uniqueness checks
|
||||
callback = [&](ManagedDocumentResult const& mdr) {
|
||||
callback = [&](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
TRI_ASSERT(res.get() != nullptr);
|
||||
res->setValue(_returned,
|
||||
static_cast<arangodb::aql::RegisterId>(curRegs),
|
||||
mdr.createAqlValue());
|
||||
|
||||
if (_returned > 0) {
|
||||
// re-use already copied AqlValues
|
||||
res->copyValuesFromFirstRow(_returned,
|
||||
static_cast<RegisterId>(curRegs));
|
||||
}
|
||||
++_returned;
|
||||
_documentProducer(res.get(), slice, curRegs, _returned);
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define ARANGOD_AQL_INDEX_BLOCK_H 1
|
||||
|
||||
#include "Aql/BlockCollector.h"
|
||||
#include "Aql/DocumentProducingBlock.h"
|
||||
#include "Aql/ExecutionBlock.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/IndexNode.h"
|
||||
|
@ -60,7 +61,7 @@ struct NonConstExpression {
|
|||
~NonConstExpression() { delete expression; }
|
||||
};
|
||||
|
||||
class IndexBlock final : public ExecutionBlock {
|
||||
class IndexBlock final : public ExecutionBlock, public DocumentProducingBlock {
|
||||
public:
|
||||
IndexBlock(ExecutionEngine* engine, IndexNode const* ep);
|
||||
|
||||
|
@ -162,7 +163,7 @@ class IndexBlock final : public ExecutionBlock {
|
|||
|
||||
/// @brief Counter how many documents have been returned/skipped
|
||||
/// during one call.
|
||||
uint64_t _returned;
|
||||
size_t _returned;
|
||||
|
||||
/// @brief Collect several AQLItemsBlocks
|
||||
BlockCollector _collector;
|
||||
|
|
|
@ -41,15 +41,48 @@ IndexNode::IndexNode(ExecutionPlan* plan, size_t id, TRI_vocbase_t* vocbase,
|
|||
std::vector<transaction::Methods::IndexHandle> const& indexes,
|
||||
Condition* condition, bool reverse)
|
||||
: ExecutionNode(plan, id),
|
||||
DocumentProducingNode(outVariable),
|
||||
_vocbase(vocbase),
|
||||
_collection(collection),
|
||||
_outVariable(outVariable),
|
||||
_indexes(indexes),
|
||||
_condition(condition),
|
||||
_reverse(reverse) {
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
TRI_ASSERT(_outVariable != nullptr);
|
||||
TRI_ASSERT(_condition != nullptr);
|
||||
}
|
||||
|
||||
/// @brief constructor for IndexNode
|
||||
IndexNode::IndexNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
DocumentProducingNode(plan, base),
|
||||
_vocbase(plan->getAst()->query()->vocbase()),
|
||||
_collection(plan->getAst()->query()->collections()->get(
|
||||
base.get("collection").copyString())),
|
||||
_indexes(),
|
||||
_condition(nullptr),
|
||||
_reverse(base.get("reverse").getBoolean()) {
|
||||
VPackSlice indexes = base.get("indexes");
|
||||
|
||||
if (!indexes.isArray()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "\"indexes\" attribute should be an array");
|
||||
}
|
||||
|
||||
_indexes.reserve(indexes.length());
|
||||
|
||||
auto trx = plan->getAst()->query()->trx();
|
||||
for (auto const& it : VPackArrayIterator(indexes)) {
|
||||
std::string iid = it.get("id").copyString();
|
||||
_indexes.emplace_back(trx->getIndexByIdentifier(_collection->getName(), iid));
|
||||
}
|
||||
|
||||
VPackSlice condition = base.get("condition");
|
||||
if (!condition.isObject()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "\"condition\" attribute should be an object");
|
||||
}
|
||||
|
||||
_condition = Condition::fromVPack(plan, condition);
|
||||
|
||||
TRI_ASSERT(_condition != nullptr);
|
||||
}
|
||||
|
||||
|
@ -62,9 +95,10 @@ void IndexNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
|||
nodes.add("database", VPackValue(_vocbase->name()));
|
||||
nodes.add("collection", VPackValue(_collection->getName()));
|
||||
nodes.add("satellite", VPackValue(_collection->isSatellite()));
|
||||
nodes.add(VPackValue("outVariable"));
|
||||
_outVariable->toVelocyPack(nodes);
|
||||
|
||||
|
||||
// add outvariable and projection
|
||||
DocumentProducingNode::toVelocyPack(nodes);
|
||||
|
||||
nodes.add(VPackValue("indexes"));
|
||||
{
|
||||
VPackArrayBuilder guard(&nodes);
|
||||
|
@ -96,40 +130,6 @@ ExecutionNode* IndexNode::clone(ExecutionPlan* plan, bool withDependencies,
|
|||
return static_cast<ExecutionNode*>(c);
|
||||
}
|
||||
|
||||
/// @brief constructor for IndexNode
|
||||
IndexNode::IndexNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ExecutionNode(plan, base),
|
||||
_vocbase(plan->getAst()->query()->vocbase()),
|
||||
_collection(plan->getAst()->query()->collections()->get(
|
||||
base.get("collection").copyString())),
|
||||
_outVariable(varFromVPack(plan->getAst(), base, "outVariable")),
|
||||
_indexes(),
|
||||
_condition(nullptr),
|
||||
_reverse(base.get("reverse").getBoolean()) {
|
||||
VPackSlice indexes = base.get("indexes");
|
||||
|
||||
if (!indexes.isArray()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "\"indexes\" attribute should be an array");
|
||||
}
|
||||
|
||||
_indexes.reserve(indexes.length());
|
||||
|
||||
auto trx = plan->getAst()->query()->trx();
|
||||
for (auto const& it : VPackArrayIterator(indexes)) {
|
||||
std::string iid = it.get("id").copyString();
|
||||
_indexes.emplace_back(trx->getIndexByIdentifier(_collection->getName(), iid));
|
||||
}
|
||||
|
||||
VPackSlice condition = base.get("condition");
|
||||
if (!condition.isObject()) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "\"condition\" attribute should be an object");
|
||||
}
|
||||
|
||||
_condition = Condition::fromVPack(plan, condition);
|
||||
|
||||
TRI_ASSERT(_condition != nullptr);
|
||||
}
|
||||
|
||||
/// @brief destroy the IndexNode
|
||||
IndexNode::~IndexNode() { delete _condition; }
|
||||
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "Basics/Common.h"
|
||||
#include "Aql/Ast.h"
|
||||
#include "Aql/DocumentProducingNode.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/types.h"
|
||||
#include "Aql/Variable.h"
|
||||
|
@ -45,7 +46,7 @@ class ExecutionPlan;
|
|||
struct Index;
|
||||
|
||||
/// @brief class IndexNode
|
||||
class IndexNode : public ExecutionNode {
|
||||
class IndexNode : public ExecutionNode, public DocumentProducingNode {
|
||||
friend class ExecutionBlock;
|
||||
friend class IndexBlock;
|
||||
|
||||
|
@ -68,9 +69,6 @@ class IndexNode : public ExecutionNode {
|
|||
/// @brief return the collection
|
||||
Collection const* collection() const { return _collection; }
|
||||
|
||||
/// @brief return out variable
|
||||
Variable const* outVariable() const { return _outVariable; }
|
||||
|
||||
/// @brief return the condition for the node
|
||||
Condition* condition() const { return _condition; }
|
||||
|
||||
|
@ -113,9 +111,6 @@ class IndexNode : public ExecutionNode {
|
|||
/// @brief collection
|
||||
Collection const* _collection;
|
||||
|
||||
/// @brief output variable
|
||||
Variable const* _outVariable;
|
||||
|
||||
/// @brief the index
|
||||
std::vector<transaction::Methods::IndexHandle> _indexes;
|
||||
|
||||
|
|
|
@ -641,7 +641,7 @@ AqlItemBlock* UpdateBlock::work(std::vector<AqlItemBlock*>& blocks) {
|
|||
if (isMultiple) {
|
||||
object.add(tmp.slice());
|
||||
} else {
|
||||
object = tmp;
|
||||
object = std::move(tmp);
|
||||
}
|
||||
}
|
||||
else {
|
||||
|
@ -1079,7 +1079,7 @@ AqlItemBlock* ReplaceBlock::work(std::vector<AqlItemBlock*>& blocks) {
|
|||
if (isMultiple) {
|
||||
object.add(tmp.slice());
|
||||
} else {
|
||||
object = tmp;
|
||||
object = std::move(tmp);
|
||||
}
|
||||
} else {
|
||||
// Use the original slice for updating
|
||||
|
|
|
@ -40,9 +40,9 @@ ModificationNode::ModificationNode(ExecutionPlan* plan,
|
|||
base.get("collection").copyString())),
|
||||
_options(base),
|
||||
_outVariableOld(
|
||||
varFromVPack(plan->getAst(), base, "outVariableOld", Optional)),
|
||||
Variable::varFromVPack(plan->getAst(), base, "outVariableOld", Optional)),
|
||||
_outVariableNew(
|
||||
varFromVPack(plan->getAst(), base, "outVariableNew", Optional)) {
|
||||
Variable::varFromVPack(plan->getAst(), base, "outVariableNew", Optional)) {
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
}
|
||||
|
@ -87,7 +87,7 @@ double ModificationNode::estimateCost(size_t& nrItems) const {
|
|||
|
||||
RemoveNode::RemoveNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ModificationNode(plan, base),
|
||||
_inVariable(varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
_inVariable(Variable::varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
|
||||
void RemoveNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
ModificationNode::toVelocyPackHelper(nodes, verbose);
|
||||
|
@ -122,7 +122,7 @@ ExecutionNode* RemoveNode::clone(ExecutionPlan* plan, bool withDependencies,
|
|||
|
||||
InsertNode::InsertNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ModificationNode(plan, base),
|
||||
_inVariable(varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
_inVariable(Variable::varFromVPack(plan->getAst(), base, "inVariable")) {}
|
||||
|
||||
/// @brief toVelocyPack
|
||||
void InsertNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
|
@ -161,9 +161,9 @@ ExecutionNode* InsertNode::clone(ExecutionPlan* plan, bool withDependencies,
|
|||
|
||||
UpdateNode::UpdateNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ModificationNode(plan, base),
|
||||
_inDocVariable(varFromVPack(plan->getAst(), base, "inDocVariable")),
|
||||
_inDocVariable(Variable::varFromVPack(plan->getAst(), base, "inDocVariable")),
|
||||
_inKeyVariable(
|
||||
varFromVPack(plan->getAst(), base, "inKeyVariable", Optional)) {}
|
||||
Variable::varFromVPack(plan->getAst(), base, "inKeyVariable", Optional)) {}
|
||||
|
||||
/// @brief toVelocyPack
|
||||
void UpdateNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
|
@ -218,9 +218,9 @@ ExecutionNode* UpdateNode::clone(ExecutionPlan* plan, bool withDependencies,
|
|||
ReplaceNode::ReplaceNode(ExecutionPlan* plan,
|
||||
arangodb::velocypack::Slice const& base)
|
||||
: ModificationNode(plan, base),
|
||||
_inDocVariable(varFromVPack(plan->getAst(), base, "inDocVariable")),
|
||||
_inDocVariable(Variable::varFromVPack(plan->getAst(), base, "inDocVariable")),
|
||||
_inKeyVariable(
|
||||
varFromVPack(plan->getAst(), base, "inKeyVariable", Optional)) {}
|
||||
Variable::varFromVPack(plan->getAst(), base, "inKeyVariable", Optional)) {}
|
||||
|
||||
/// @brief toVelocyPack
|
||||
void ReplaceNode::toVelocyPackHelper(VPackBuilder& nodes, bool verbose) const {
|
||||
|
@ -274,9 +274,9 @@ ExecutionNode* ReplaceNode::clone(ExecutionPlan* plan, bool withDependencies,
|
|||
|
||||
UpsertNode::UpsertNode(ExecutionPlan* plan, arangodb::velocypack::Slice const& base)
|
||||
: ModificationNode(plan, base),
|
||||
_inDocVariable(varFromVPack(plan->getAst(), base, "inDocVariable")),
|
||||
_insertVariable(varFromVPack(plan->getAst(), base, "insertVariable")),
|
||||
_updateVariable(varFromVPack(plan->getAst(), base, "updateVariable")),
|
||||
_inDocVariable(Variable::varFromVPack(plan->getAst(), base, "inDocVariable")),
|
||||
_insertVariable(Variable::varFromVPack(plan->getAst(), base, "insertVariable")),
|
||||
_updateVariable(Variable::varFromVPack(plan->getAst(), base, "updateVariable")),
|
||||
_isReplace(base.get("isReplace").getBoolean()) {}
|
||||
|
||||
/// @brief toVelocyPack
|
||||
|
|
|
@ -135,7 +135,7 @@ struct OptimizerRule {
|
|||
|
||||
// remove redundant OR conditions
|
||||
removeRedundantOrRule_pass6,
|
||||
|
||||
|
||||
applyGeoIndexRule,
|
||||
|
||||
useIndexesRule_pass6,
|
||||
|
@ -147,10 +147,10 @@ struct OptimizerRule {
|
|||
|
||||
// try to find sort blocks which are superseeded by indexes
|
||||
useIndexForSortRule_pass6,
|
||||
|
||||
|
||||
// sort values used in IN comparisons of remaining filters
|
||||
sortInValuesRule_pass6,
|
||||
|
||||
|
||||
// merge filters into graph traversals
|
||||
optimizeTraversalsRule_pass6,
|
||||
// remove redundant filters statements
|
||||
|
@ -163,6 +163,10 @@ struct OptimizerRule {
|
|||
removeTraversalPathVariable_pass6,
|
||||
prepareTraversalsRule_pass6,
|
||||
|
||||
// simplify an EnumerationCollectionNode that fetches an
|
||||
// entire document to a projection of this document
|
||||
reduceExtractionToProjectionRule_pass6,
|
||||
|
||||
/// Pass 9: push down calculations beyond FILTERs and LIMITs
|
||||
moveCalculationsDownRule_pass9,
|
||||
|
||||
|
|
|
@ -28,6 +28,7 @@
|
|||
#include "Aql/CollectOptions.h"
|
||||
#include "Aql/Collection.h"
|
||||
#include "Aql/ConditionFinder.h"
|
||||
#include "Aql/DocumentProducingNode.h"
|
||||
#include "Aql/ExecutionEngine.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/ExecutionPlan.h"
|
||||
|
@ -1507,7 +1508,7 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
|
|||
// in this case we must not perform the replacements
|
||||
while (current != nullptr) {
|
||||
if (current->getType() == EN::COLLECT) {
|
||||
if (static_cast<CollectNode const*>(current)->hasOutVariable()) {
|
||||
if (static_cast<CollectNode const*>(current)->hasOutVariableButNoCount()) {
|
||||
hasCollectWithOutVariable = true;
|
||||
break;
|
||||
}
|
||||
|
@ -1538,7 +1539,7 @@ void arangodb::aql::removeUnnecessaryCalculationsRule(
|
|||
current->getVariablesUsedHere(vars);
|
||||
if (vars.find(outvars[0]) != vars.end()) {
|
||||
if (current->getType() == EN::COLLECT) {
|
||||
if (static_cast<CollectNode const*>(current)->hasOutVariable()) {
|
||||
if (static_cast<CollectNode const*>(current)->hasOutVariableButNoCount()) {
|
||||
// COLLECT with an INTO variable will collect all variables from
|
||||
// the scope, so we shouldn't try to remove or change the meaning
|
||||
// of variables
|
||||
|
@ -2364,9 +2365,8 @@ void arangodb::aql::scatterInClusterRule(Optimizer* opt,
|
|||
void arangodb::aql::distributeInClusterRule(Optimizer* opt,
|
||||
std::unique_ptr<ExecutionPlan> plan,
|
||||
OptimizerRule const* rule) {
|
||||
bool wasModified = false;
|
||||
|
||||
if (arangodb::ServerState::instance()->isCoordinator()) {
|
||||
bool wasModified = false;
|
||||
// we are a coordinator, we replace the root if it is a modification node
|
||||
|
||||
// only replace if it is the last node in the plan
|
||||
|
|
|
@ -165,8 +165,8 @@ void OptimizerRulesFeature::addRules() {
|
|||
OptimizerRule::removeTraversalPathVariable_pass6, DoesNotCreateAdditionalPlans, CanBeDisabled);
|
||||
|
||||
// prepare traversal info
|
||||
registerRule("prepare-traversals", prepareTraversalsRule,
|
||||
OptimizerRule::prepareTraversalsRule_pass6, DoesNotCreateAdditionalPlans, CanNotBeDisabled, CanBeDisabled);
|
||||
registerHiddenRule("prepare-traversals", prepareTraversalsRule,
|
||||
OptimizerRule::prepareTraversalsRule_pass6, DoesNotCreateAdditionalPlans, CanNotBeDisabled);
|
||||
|
||||
/// "Pass 5": try to remove redundant or unnecessary nodes (second try)
|
||||
// remove filters from the query that are not necessary at all
|
||||
|
@ -212,12 +212,12 @@ void OptimizerRulesFeature::addRules() {
|
|||
// try to find sort blocks which are superseeded by indexes
|
||||
registerRule("use-index-for-sort", useIndexForSortRule,
|
||||
OptimizerRule::useIndexForSortRule_pass6, DoesNotCreateAdditionalPlans, CanBeDisabled);
|
||||
|
||||
|
||||
// sort in-values in filters (note: must come after
|
||||
// remove-filter-covered-by-index rule)
|
||||
registerRule("sort-in-values", sortInValuesRule, OptimizerRule::sortInValuesRule_pass6,
|
||||
DoesNotCreateAdditionalPlans, CanBeDisabled);
|
||||
|
||||
|
||||
// remove calculations that are never necessary
|
||||
registerRule("remove-unnecessary-calculations-2",
|
||||
removeUnnecessaryCalculationsRule,
|
||||
|
|
|
@ -136,7 +136,7 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan,
|
|||
_toCondition(nullptr) {
|
||||
// Start Vertex
|
||||
if (base.hasKey("startInVariable")) {
|
||||
_inStartVariable = varFromVPack(plan->getAst(), base, "startInVariable");
|
||||
_inStartVariable = Variable::varFromVPack(plan->getAst(), base, "startInVariable");
|
||||
} else {
|
||||
VPackSlice v = base.get("startVertexId");
|
||||
if (!v.isString()) {
|
||||
|
@ -153,7 +153,7 @@ ShortestPathNode::ShortestPathNode(ExecutionPlan* plan,
|
|||
|
||||
// Target Vertex
|
||||
if (base.hasKey("targetInVariable")) {
|
||||
_inTargetVariable = varFromVPack(plan->getAst(), base, "targetInVariable");
|
||||
_inTargetVariable = Variable::varFromVPack(plan->getAst(), base, "targetInVariable");
|
||||
} else {
|
||||
VPackSlice v = base.get("targetVertexId");
|
||||
if (!v.isString()) {
|
||||
|
|
|
@ -27,9 +27,11 @@
|
|||
|
||||
using namespace arangodb::aql;
|
||||
|
||||
namespace {
|
||||
|
||||
/// @brief whether or not an attribute is contained in a vector
|
||||
static bool IsContained (std::vector<std::vector<arangodb::basics::AttributeName>> const& attributes,
|
||||
std::vector<arangodb::basics::AttributeName> const& attribute) {
|
||||
static bool isContained(std::vector<std::vector<arangodb::basics::AttributeName>> const& attributes,
|
||||
std::vector<arangodb::basics::AttributeName> const& attribute) {
|
||||
for (auto const& it : attributes) {
|
||||
if (arangodb::basics::AttributeName::isIdentical(it, attribute, false)) {
|
||||
return true;
|
||||
|
@ -39,6 +41,8 @@ static bool IsContained (std::vector<std::vector<arangodb::basics::AttributeName
|
|||
return false;
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
/// @brief create an empty condition
|
||||
SortCondition::SortCondition()
|
||||
: _fields(),
|
||||
|
@ -164,8 +168,8 @@ size_t SortCondition::coveredAttributes(
|
|||
// no match
|
||||
bool isConstant = false;
|
||||
|
||||
if (IsContained(indexAttributes, field.second) &&
|
||||
IsContained(_constAttributes, field.second)) {
|
||||
if (isContained(indexAttributes, field.second) &&
|
||||
isContained(_constAttributes, field.second)) {
|
||||
// no field match, but a constant attribute
|
||||
isConstant = true;
|
||||
++fieldsPosition;
|
||||
|
@ -173,7 +177,7 @@ size_t SortCondition::coveredAttributes(
|
|||
}
|
||||
|
||||
if (!isConstant &&
|
||||
IsContained(_constAttributes, indexAttributes[i])) {
|
||||
isContained(_constAttributes, indexAttributes[i])) {
|
||||
// no field match, but a constant attribute
|
||||
isConstant = true;
|
||||
++i; // next index field
|
||||
|
|
|
@ -39,6 +39,7 @@
|
|||
#include "Transaction/Methods.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
#include "V8/v8-globals.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/SingleServerTraverser.h"
|
||||
#include "VocBase/ticks.h"
|
||||
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Aql/Query.h"
|
||||
#include "Aql/SortCondition.h"
|
||||
#include "Aql/Variable.h"
|
||||
#include "Cluster/ClusterComm.h"
|
||||
#include "Graph/BaseOptions.h"
|
||||
#include "Indexes/Index.h"
|
||||
|
@ -170,7 +171,7 @@ TraversalNode::TraversalNode(ExecutionPlan* plan,
|
|||
_toCondition(nullptr) {
|
||||
// In Vertex
|
||||
if (base.hasKey("inVariable")) {
|
||||
_inVariable = varFromVPack(plan->getAst(), base, "inVariable");
|
||||
_inVariable = Variable::varFromVPack(plan->getAst(), base, "inVariable");
|
||||
} else {
|
||||
VPackSlice v = base.get("vertexId");
|
||||
if (!v.isString()) {
|
||||
|
@ -203,7 +204,7 @@ TraversalNode::TraversalNode(ExecutionPlan* plan,
|
|||
|
||||
// Out variables
|
||||
if (base.hasKey("pathOutVariable")) {
|
||||
_pathOutVariable = varFromVPack(plan->getAst(), base, "pathOutVariable");
|
||||
_pathOutVariable = Variable::varFromVPack(plan->getAst(), base, "pathOutVariable");
|
||||
}
|
||||
|
||||
// Filter Condition Parts
|
||||
|
|
|
@ -22,8 +22,11 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "Variable.h"
|
||||
#include "Aql/Ast.h"
|
||||
#include "Aql/VariableGenerator.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
|
||||
#include <velocypack/Slice.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
||||
using namespace arangodb::aql;
|
||||
|
@ -72,3 +75,23 @@ Variable const* Variable::replace(
|
|||
|
||||
return variable;
|
||||
}
|
||||
|
||||
/// @brief factory for (optional) variables from VPack
|
||||
Variable* Variable::varFromVPack(Ast* ast,
|
||||
arangodb::velocypack::Slice const& base,
|
||||
char const* variableName, bool optional) {
|
||||
VPackSlice variable = base.get(variableName);
|
||||
|
||||
if (variable.isNone()) {
|
||||
if (optional) {
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::string msg;
|
||||
msg +=
|
||||
"mandatory variable \"" + std::string(variableName) + "\" not found.";
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg);
|
||||
}
|
||||
return ast->variables()->createVariable(variable);
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include "Basics/Common.h"
|
||||
#include "Aql/types.h"
|
||||
|
||||
|
||||
namespace arangodb {
|
||||
namespace velocypack {
|
||||
class Builder;
|
||||
|
@ -35,9 +34,9 @@ class Slice;
|
|||
}
|
||||
|
||||
namespace aql {
|
||||
class Ast;
|
||||
|
||||
struct Variable {
|
||||
|
||||
/// @brief create the variable
|
||||
Variable(std::string const&, VariableId);
|
||||
|
||||
|
@ -75,6 +74,10 @@ struct Variable {
|
|||
static Variable const* replace(
|
||||
Variable const*, std::unordered_map<VariableId, Variable const*> const&);
|
||||
|
||||
/// @brief factory for (optional) variables from VPack
|
||||
static Variable* varFromVPack(Ast* ast, arangodb::velocypack::Slice const& base,
|
||||
char const* variableName, bool optional = false);
|
||||
|
||||
/// @brief variable name
|
||||
std::string name;
|
||||
|
||||
|
|
|
@ -120,6 +120,8 @@ SET(ARANGOD_SOURCES
|
|||
Aql/Collections.cpp
|
||||
Aql/Condition.cpp
|
||||
Aql/ConditionFinder.cpp
|
||||
Aql/DocumentProducingBlock.cpp
|
||||
Aql/DocumentProducingNode.cpp
|
||||
Aql/EnumerateCollectionBlock.cpp
|
||||
Aql/EnumerateListBlock.cpp
|
||||
Aql/ExecutionBlock.cpp
|
||||
|
@ -236,6 +238,7 @@ SET(ARANGOD_SOURCES
|
|||
Graph/TraverserDocumentCache.cpp
|
||||
Indexes/Index.cpp
|
||||
Indexes/IndexIterator.cpp
|
||||
Indexes/IndexLookupContext.cpp
|
||||
Indexes/SimpleAttributeEqualityMatcher.cpp
|
||||
InternalRestHandler/InternalRestTraverserHandler.cpp
|
||||
Pregel/AggregatorHandler.cpp
|
||||
|
|
|
@ -150,9 +150,7 @@ void HttpCommTask::addResponse(HttpResponse* response,
|
|||
}
|
||||
|
||||
// reserve a buffer with some spare capacity
|
||||
WriteBuffer buffer(
|
||||
new StringBuffer(TRI_UNKNOWN_MEM_ZONE, responseBodyLength + 128, false),
|
||||
stat);
|
||||
WriteBuffer buffer(leaseStringBuffer(responseBodyLength + 128), stat);
|
||||
|
||||
// write header
|
||||
response->writeHeader(buffer._buffer);
|
||||
|
|
|
@ -109,9 +109,11 @@ void VstCommTask::addResponse(VstResponse* response, RequestStatistics* stat) {
|
|||
uint64_t const id = response_message._id;
|
||||
|
||||
std::vector<VPackSlice> slices;
|
||||
slices.push_back(response_message._header);
|
||||
|
||||
if (response->generateBody()) {
|
||||
slices.reserve(1 + response_message._payloads.size());
|
||||
slices.push_back(response_message._header);
|
||||
|
||||
for (auto& payload : response_message._payloads) {
|
||||
LOG_TOPIC(DEBUG, Logger::REQUESTS) << "\"vst-request-result\",\""
|
||||
<< (void*)this << "/" << id << "\","
|
||||
|
@ -119,6 +121,9 @@ void VstCommTask::addResponse(VstResponse* response, RequestStatistics* stat) {
|
|||
|
||||
slices.push_back(payload);
|
||||
}
|
||||
} else {
|
||||
// header only
|
||||
slices.push_back(response_message._header);
|
||||
}
|
||||
|
||||
// set some sensible maxchunk size and compression
|
||||
|
|
|
@ -26,6 +26,8 @@
|
|||
#include "Cluster/ServerState.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
|
@ -57,9 +59,7 @@ bool IndexIterator::hasExtra() const {
|
|||
|
||||
bool IndexIterator::nextDocument(DocumentCallback const& cb, size_t limit) {
|
||||
return next([this, &cb](DocumentIdentifierToken const& token) {
|
||||
if (_collection->readDocument(_trx, token, *_mmdr)) {
|
||||
cb(*_mmdr);
|
||||
}
|
||||
_collection->readDocumentWithCallback(_trx, token, cb);
|
||||
}, limit);
|
||||
}
|
||||
|
||||
|
|
|
@ -62,7 +62,8 @@ class Methods;
|
|||
class IndexIterator {
|
||||
public:
|
||||
typedef std::function<void(DocumentIdentifierToken const& token)> TokenCallback;
|
||||
typedef std::function<void(ManagedDocumentResult const& mdr)> DocumentCallback;
|
||||
typedef std::function<void(DocumentIdentifierToken const& token,
|
||||
velocypack::Slice extra)> DocumentCallback;
|
||||
typedef std::function<void(DocumentIdentifierToken const& token,
|
||||
velocypack::Slice extra)> ExtraCallback;
|
||||
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "IndexLookupContext.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
using namespace arangodb;
|
||||
|
||||
IndexLookupContext::IndexLookupContext(transaction::Methods* trx,
|
||||
LogicalCollection* collection,
|
||||
ManagedDocumentResult* result,
|
||||
size_t numFields)
|
||||
: _trx(trx), _collection(collection), _result(result), _numFields(numFields) {
|
||||
TRI_ASSERT(_trx != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
TRI_ASSERT(_result != nullptr);
|
||||
}
|
||||
|
||||
uint8_t const* IndexLookupContext::lookup(DocumentIdentifierToken token) {
|
||||
try {
|
||||
if (_collection->readDocument(_trx, token, *_result)) {
|
||||
return _result->vpack();
|
||||
}
|
||||
} catch (...) {
|
||||
}
|
||||
return nullptr;
|
||||
}
|
|
@ -25,36 +25,24 @@
|
|||
#define ARANGOD_INDEXES_INDEX_LOOKUP_CONTEXT_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "StorageEngine/DocumentIdentifierToken.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
namespace arangodb {
|
||||
class LogicalCollection;
|
||||
class ManagedDocumentResult;
|
||||
|
||||
namespace transaction {
|
||||
class Methods;
|
||||
}
|
||||
|
||||
class IndexLookupContext {
|
||||
public:
|
||||
IndexLookupContext() = delete;
|
||||
IndexLookupContext(transaction::Methods* trx, LogicalCollection* collection, ManagedDocumentResult* result, size_t numFields)
|
||||
: _trx(trx), _collection(collection), _result(result), _numFields(numFields) {
|
||||
TRI_ASSERT(_trx != nullptr);
|
||||
TRI_ASSERT(_collection != nullptr);
|
||||
TRI_ASSERT(_result != nullptr);
|
||||
}
|
||||
|
||||
IndexLookupContext(transaction::Methods* trx, LogicalCollection* collection, ManagedDocumentResult* result, size_t numFields);
|
||||
~IndexLookupContext() {}
|
||||
|
||||
uint8_t const* lookup(DocumentIdentifierToken token) {
|
||||
try {
|
||||
if (_collection->readDocument(_trx, token, *_result)) {
|
||||
return _result->vpack();
|
||||
}
|
||||
} catch (...) {
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
uint8_t const* lookup(DocumentIdentifierToken token);
|
||||
|
||||
ManagedDocumentResult* result() { return _result; }
|
||||
|
||||
|
|
|
@ -1927,6 +1927,19 @@ bool MMFilesCollection::readDocument(transaction::Methods* trx,
|
|||
return false;
|
||||
}
|
||||
|
||||
bool MMFilesCollection::readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb) {
|
||||
auto tkn = static_cast<MMFilesToken const*>(&token);
|
||||
TRI_voc_rid_t revisionId = tkn->revisionId();
|
||||
uint8_t const* vpack = lookupRevisionVPack(revisionId);
|
||||
if (vpack != nullptr) {
|
||||
cb(token, VPackSlice(vpack));
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool MMFilesCollection::readDocumentConditional(
|
||||
transaction::Methods* trx, DocumentIdentifierToken const& token,
|
||||
TRI_voc_tick_t maxTick, ManagedDocumentResult& result) {
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "Indexes/IndexLookupContext.h"
|
||||
#include "MMFiles/MMFilesDatafileStatistics.h"
|
||||
#include "MMFiles/MMFilesDatafileStatisticsContainer.h"
|
||||
|
@ -34,6 +35,7 @@
|
|||
#include "MMFiles/MMFilesRevisionsCache.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
struct MMFilesDatafile;
|
||||
|
@ -328,6 +330,10 @@ class MMFilesCollection final : public PhysicalCollection {
|
|||
bool readDocument(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) override;
|
||||
|
||||
bool readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb) override;
|
||||
|
||||
bool readDocumentConditional(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
|
|
|
@ -86,6 +86,8 @@ class MMFilesEngine final : public StorageEngine {
|
|||
void stop() override;
|
||||
|
||||
bool supportsDfdb() const override { return true; }
|
||||
|
||||
bool useRawDocumentPointers() override { return true; }
|
||||
|
||||
std::shared_ptr<arangodb::velocypack::Builder>
|
||||
getReplicationApplierConfiguration(TRI_vocbase_t* vocbase,
|
||||
|
|
|
@ -265,18 +265,16 @@ static bool IsEqualKeyElementMulti(void* userData, VPackSlice const* left,
|
|||
TRI_ASSERT(context != nullptr);
|
||||
|
||||
// TODO: is it a performance improvement to compare the hash values first?
|
||||
size_t const n = left->length();
|
||||
VPackArrayIterator it(*left);
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
VPackSlice const leftVPack = left->at(i);
|
||||
VPackSlice const rightVPack = right->slice(context, i);
|
||||
|
||||
int res = arangodb::basics::VelocyPackHelper::compare(leftVPack, rightVPack,
|
||||
false);
|
||||
while (it.valid()) {
|
||||
int res = arangodb::basics::VelocyPackHelper::compare(it.value(), right->slice(context, it.index()), false);
|
||||
|
||||
if (res != 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
it.next();
|
||||
}
|
||||
|
||||
return true;
|
||||
|
|
|
@ -37,6 +37,7 @@
|
|||
#include "MMFiles/MMFilesPrimaryIndex.h"
|
||||
#include "MMFiles/MMFilesSkiplistIndex.h"
|
||||
#include "MMFiles/mmfiles-fulltext-index.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
|
|
@ -1977,6 +1977,7 @@ void MMFilesLogfileManager::stopMMFilesCollectorThread() {
|
|||
|
||||
if (status == MMFilesWalLogfile::StatusType::SEAL_REQUESTED) {
|
||||
canAbort = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -35,6 +35,7 @@
|
|||
#include "Transaction/Helpers.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
@ -448,18 +449,22 @@ void MMFilesSkiplistInLookupBuilder::buildSearchValues() {
|
|||
_upperBuilder->clear();
|
||||
_upperBuilder->openArray();
|
||||
|
||||
for (size_t i = 0; i < data.length() - 1; ++i) {
|
||||
size_t const n = data.length();
|
||||
|
||||
for (size_t i = 0; i < n - 1; ++i) {
|
||||
if (inPos != _inPositions.end() && i == inPos->field) {
|
||||
_lowerBuilder->add(data.at(i).at(inPos->current));
|
||||
_upperBuilder->add(data.at(i).at(inPos->current));
|
||||
VPackSlice s = data.at(i).at(inPos->current);
|
||||
_lowerBuilder->add(s);
|
||||
_upperBuilder->add(s);
|
||||
inPos++;
|
||||
} else {
|
||||
_lowerBuilder->add(data.at(i));
|
||||
_upperBuilder->add(data.at(i));
|
||||
VPackSlice s = data.at(i);
|
||||
_lowerBuilder->add(s);
|
||||
_upperBuilder->add(s);
|
||||
}
|
||||
}
|
||||
|
||||
VPackSlice bounds = data.at(data.length() - 1);
|
||||
VPackSlice bounds = data.at(n - 1);
|
||||
TRI_ASSERT(bounds.isArray());
|
||||
TRI_ASSERT(bounds.length() == 2);
|
||||
VPackSlice b = bounds.at(0);
|
||||
|
@ -477,7 +482,9 @@ void MMFilesSkiplistInLookupBuilder::buildSearchValues() {
|
|||
_upperBuilder->close();
|
||||
_upperSlice = _upperBuilder->slice();
|
||||
} else {
|
||||
for (size_t i = 0; i < data.length(); ++i) {
|
||||
size_t const n = data.length();
|
||||
|
||||
for (size_t i = 0; i < n; ++i) {
|
||||
if (inPos != _inPositions.end() && i == inPos->field) {
|
||||
_lowerBuilder->add(data.at(i).at(inPos->current));
|
||||
inPos++;
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
|
||||
#include "GraphStore.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/MutexLocker.h"
|
||||
#include "Pregel/CommonFormats.h"
|
||||
|
@ -40,6 +39,7 @@
|
|||
#include "Utils/OperationOptions.h"
|
||||
#include "VocBase/EdgeCollectionInfo.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/ticks.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
|
@ -49,6 +49,8 @@
|
|||
#include <unistd.h>
|
||||
#endif
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::pregel;
|
||||
|
||||
|
@ -431,13 +433,12 @@ void GraphStore<V, E>::_loadEdges(transaction::Methods* trx,
|
|||
documentID.c_str(), edgeShard.c_str());
|
||||
}
|
||||
|
||||
auto cb = [&](ManagedDocumentResult const& mdr) {
|
||||
VPackSlice document(mdr.vpack());
|
||||
if (document.isExternal()) {
|
||||
document = document.resolveExternal();
|
||||
auto cb = [&](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
if (slice.isExternal()) {
|
||||
slice = slice.resolveExternal();
|
||||
}
|
||||
|
||||
std::string toValue = document.get(StaticStrings::ToString).copyString();
|
||||
std::string toValue = slice.get(StaticStrings::ToString).copyString();
|
||||
std::size_t pos = toValue.find('/');
|
||||
std::string collectionName = toValue.substr(0, pos);
|
||||
|
||||
|
@ -466,7 +467,7 @@ void GraphStore<V, E>::_loadEdges(transaction::Methods* trx,
|
|||
if (res == TRI_ERROR_NO_ERROR) {
|
||||
// PregelShard sourceShard = (PregelShard)_config->shardId(edgeShard);
|
||||
edge->_targetShard = (PregelShard)_config->shardId(responsibleShard);
|
||||
_graphFormat->copyEdgeData(document, edge->data(), sizeof(E));
|
||||
_graphFormat->copyEdgeData(slice, edge->data(), sizeof(E));
|
||||
if (edge->_targetShard != (PregelShard)-1) {
|
||||
added++;
|
||||
offset++;
|
||||
|
|
|
@ -27,10 +27,11 @@
|
|||
#include "Aql/Variable.h"
|
||||
#include "Basics/ScopeGuard.h"
|
||||
#include "Cluster/ClusterMethods.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "Utils/CollectionNameResolver.h"
|
||||
#include "Utils/OperationCursor.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "Transaction/StandaloneContext.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
#include <velocypack/Iterator.h>
|
||||
#include <velocypack/velocypack-aliases.h>
|
||||
|
|
|
@ -23,6 +23,7 @@ set(ROCKSDB_SOURCES
|
|||
RocksDBEngine/RocksDBKeyBounds.cpp
|
||||
RocksDBEngine/RocksDBLogValue.cpp
|
||||
RocksDBEngine/RocksDBMethods.cpp
|
||||
RocksDBEngine/RocksDBOptimizerRules.cpp
|
||||
RocksDBEngine/RocksDBPrimaryIndex.cpp
|
||||
RocksDBEngine/RocksDBReplicationCommon.cpp
|
||||
RocksDBEngine/RocksDBReplicationContext.cpp
|
||||
|
|
|
@ -59,6 +59,7 @@
|
|||
#include "Utils/Events.h"
|
||||
#include "Utils/OperationOptions.h"
|
||||
#include "Utils/SingleCollectionTransaction.h"
|
||||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ticks.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
@ -754,15 +755,18 @@ bool RocksDBCollection::readDocument(transaction::Methods* trx,
|
|||
return false;
|
||||
}
|
||||
|
||||
// read using a token, bypassing the cache
|
||||
bool RocksDBCollection::readDocumentNoCache(
|
||||
transaction::Methods* trx, DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) {
|
||||
// read using a token!
|
||||
bool RocksDBCollection::readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb) {
|
||||
// TODO: why do we have read(), readDocument() and lookupKey()?
|
||||
auto tkn = static_cast<RocksDBToken const*>(&token);
|
||||
RocksDBToken const* tkn = static_cast<RocksDBToken const*>(&token);
|
||||
TRI_voc_rid_t revisionId = tkn->revisionId();
|
||||
auto res = lookupRevisionVPack(revisionId, trx, result, false);
|
||||
return res.ok();
|
||||
if (revisionId != 0) {
|
||||
auto res = lookupRevisionVPack(revisionId, trx, cb, true);
|
||||
return res.ok();
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
Result RocksDBCollection::insert(arangodb::transaction::Methods* trx,
|
||||
|
@ -1247,10 +1251,9 @@ arangodb::Result RocksDBCollection::fillIndexes(
|
|||
RocksDBBatchedMethods batched(state, &batch);
|
||||
|
||||
arangodb::Result res;
|
||||
auto cb = [&](ManagedDocumentResult const& mdr) {
|
||||
auto cb = [&](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
if (res.ok()) {
|
||||
res = ridx->insertInternal(trx, &batched, mdr.lastRevisionId(),
|
||||
VPackSlice(mdr.vpack()));
|
||||
res = ridx->insertInternal(trx, &batched, token._data, slice);
|
||||
if (res.ok()) {
|
||||
numDocsWritten++;
|
||||
}
|
||||
|
@ -1516,7 +1519,6 @@ arangodb::Result RocksDBCollection::lookupRevisionVPack(
|
|||
TRI_ASSERT(_objectId != 0);
|
||||
|
||||
auto key = RocksDBKey::Document(_objectId, revisionId);
|
||||
std::string value;
|
||||
|
||||
if (withCache && useCache()) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
|
@ -1524,14 +1526,63 @@ arangodb::Result RocksDBCollection::lookupRevisionVPack(
|
|||
auto f = _cache->find(key.string().data(),
|
||||
static_cast<uint32_t>(key.string().size()));
|
||||
if (f.found()) {
|
||||
value.append(reinterpret_cast<char const*>(f.value()->value()),
|
||||
static_cast<size_t>(f.value()->valueSize));
|
||||
mdr.setManaged(std::move(value), revisionId);
|
||||
std::string* value = mdr.prepareStringUsage();
|
||||
value->append(reinterpret_cast<char const*>(f.value()->value()),
|
||||
static_cast<size_t>(f.value()->valueSize));
|
||||
mdr.setManagedAfterStringUsage(revisionId);
|
||||
return {TRI_ERROR_NO_ERROR};
|
||||
}
|
||||
}
|
||||
|
||||
RocksDBMethods* mthd = RocksDBTransactionState::toMethods(trx);
|
||||
std::string* value = mdr.prepareStringUsage();
|
||||
Result res = mthd->Get(RocksDBColumnFamily::documents(), key, value);
|
||||
if (res.ok()) {
|
||||
if (withCache && useCache()) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
// write entry back to cache
|
||||
auto entry = cache::CachedValue::construct(
|
||||
key.string().data(), static_cast<uint32_t>(key.string().size()),
|
||||
value->data(), static_cast<uint64_t>(value->size()));
|
||||
auto status = _cache->insert(entry);
|
||||
if (status.fail()) {
|
||||
delete entry;
|
||||
}
|
||||
}
|
||||
|
||||
mdr.setManagedAfterStringUsage(revisionId);
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::FIXME)
|
||||
<< "NOT FOUND rev: " << revisionId << " trx: " << trx->state()->id()
|
||||
<< " seq: " << mthd->readOptions().snapshot->GetSequenceNumber()
|
||||
<< " objectID " << _objectId << " name: " << _logicalCollection->name();
|
||||
mdr.reset();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
arangodb::Result RocksDBCollection::lookupRevisionVPack(
|
||||
TRI_voc_rid_t revisionId, transaction::Methods* trx,
|
||||
IndexIterator::DocumentCallback const& cb, bool withCache) const {
|
||||
TRI_ASSERT(trx->state()->isRunning());
|
||||
TRI_ASSERT(_objectId != 0);
|
||||
|
||||
auto key = RocksDBKey::Document(_objectId, revisionId);
|
||||
|
||||
if (withCache && useCache()) {
|
||||
TRI_ASSERT(_cache != nullptr);
|
||||
// check cache first for fast path
|
||||
auto f = _cache->find(key.string().data(),
|
||||
static_cast<uint32_t>(key.string().size()));
|
||||
if (f.found()) {
|
||||
cb(RocksDBToken(revisionId), VPackSlice(reinterpret_cast<char const*>(f.value()->value())));
|
||||
return {TRI_ERROR_NO_ERROR};
|
||||
}
|
||||
}
|
||||
|
||||
std::string value;
|
||||
auto state = RocksDBTransactionState::toState(trx);
|
||||
RocksDBMethods* mthd = state->rocksdbMethods();
|
||||
Result res = mthd->Get(RocksDBColumnFamily::documents(), key, &value);
|
||||
TRI_ASSERT(value.data());
|
||||
if (res.ok()) {
|
||||
|
@ -1547,13 +1598,12 @@ arangodb::Result RocksDBCollection::lookupRevisionVPack(
|
|||
}
|
||||
}
|
||||
|
||||
mdr.setManaged(std::move(value), revisionId);
|
||||
cb(RocksDBToken(revisionId), VPackSlice(value.data()));
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::FIXME)
|
||||
<< "NOT FOUND rev: " << revisionId << " trx: " << trx->state()->id()
|
||||
<< " seq: " << mthd->readOptions().snapshot->GetSequenceNumber()
|
||||
<< " objectID " << _objectId << " name: " << _logicalCollection->name();
|
||||
mdr.reset();
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@
|
|||
#include "Indexes/IndexLookupContext.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "StorageEngine/PhysicalCollection.h"
|
||||
#include "VocBase/KeyGenerator.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
|
||||
namespace rocksdb {
|
||||
|
@ -139,10 +139,10 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
bool readDocument(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) override;
|
||||
|
||||
bool readDocumentNoCache(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
bool readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb) override;
|
||||
|
||||
Result insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
@ -244,6 +244,10 @@ class RocksDBCollection final : public PhysicalCollection {
|
|||
arangodb::Result lookupRevisionVPack(TRI_voc_rid_t, transaction::Methods*,
|
||||
arangodb::ManagedDocumentResult&,
|
||||
bool withCache) const;
|
||||
|
||||
arangodb::Result lookupRevisionVPack(TRI_voc_rid_t, transaction::Methods*,
|
||||
IndexIterator::DocumentCallback const& cb,
|
||||
bool withCache) const;
|
||||
|
||||
void recalculateIndexEstimates(std::vector<std::shared_ptr<Index>>& indexes);
|
||||
|
||||
|
|
|
@ -44,90 +44,6 @@
|
|||
namespace arangodb {
|
||||
namespace rocksutils {
|
||||
|
||||
uint64_t uint64FromPersistent(char const* p) {
|
||||
uint64_t value = 0;
|
||||
uint64_t x = 0;
|
||||
uint8_t const* ptr = reinterpret_cast<uint8_t const*>(p);
|
||||
uint8_t const* end = ptr + sizeof(uint64_t);
|
||||
do {
|
||||
value += static_cast<uint64_t>(*ptr++) << x;
|
||||
x += 8;
|
||||
} while (ptr < end);
|
||||
return value;
|
||||
}
|
||||
|
||||
void uint64ToPersistent(char* p, uint64_t value) {
|
||||
char* end = p + sizeof(uint64_t);
|
||||
do {
|
||||
*p++ = static_cast<uint8_t>(value & 0xffU);
|
||||
value >>= 8;
|
||||
} while (p < end);
|
||||
}
|
||||
|
||||
void uint64ToPersistent(std::string& p, uint64_t value) {
|
||||
size_t len = 0;
|
||||
do {
|
||||
p.push_back(static_cast<char>(value & 0xffU));
|
||||
value >>= 8;
|
||||
} while (++len < sizeof(uint64_t));
|
||||
}
|
||||
|
||||
uint32_t uint32FromPersistent(char const* p) {
|
||||
uint32_t value = 0;
|
||||
uint32_t x = 0;
|
||||
uint8_t const* ptr = reinterpret_cast<uint8_t const*>(p);
|
||||
uint8_t const* end = ptr + sizeof(uint32_t);
|
||||
do {
|
||||
value += static_cast<uint16_t>(*ptr++) << x;
|
||||
x += 8;
|
||||
} while (ptr < end);
|
||||
return value;
|
||||
}
|
||||
|
||||
void uint32ToPersistent(char* p, uint32_t value) {
|
||||
char* end = p + sizeof(uint32_t);
|
||||
do {
|
||||
*p++ = static_cast<uint8_t>(value & 0xffU);
|
||||
value >>= 8;
|
||||
} while (p < end);
|
||||
}
|
||||
|
||||
void uint32ToPersistent(std::string& p, uint32_t value) {
|
||||
size_t len = 0;
|
||||
do {
|
||||
p.push_back(static_cast<char>(value & 0xffU));
|
||||
value >>= 8;
|
||||
} while (++len < sizeof(uint32_t));
|
||||
}
|
||||
|
||||
uint16_t uint16FromPersistent(char const* p) {
|
||||
uint16_t value = 0;
|
||||
uint16_t x = 0;
|
||||
uint8_t const* ptr = reinterpret_cast<uint8_t const*>(p);
|
||||
uint8_t const* end = ptr + sizeof(uint16_t);
|
||||
do {
|
||||
value += static_cast<uint16_t>(*ptr++) << x;
|
||||
x += 8;
|
||||
} while (ptr < end);
|
||||
return value;
|
||||
}
|
||||
|
||||
void uint16ToPersistent(char* p, uint16_t value) {
|
||||
char* end = p + sizeof(uint16_t);
|
||||
do {
|
||||
*p++ = static_cast<uint8_t>(value & 0xffU);
|
||||
value >>= 8;
|
||||
} while (p < end);
|
||||
}
|
||||
|
||||
void uint16ToPersistent(std::string& p, uint16_t value) {
|
||||
size_t len = 0;
|
||||
do {
|
||||
p.push_back(static_cast<char>(value & 0xffU));
|
||||
value >>= 8;
|
||||
} while (++len < sizeof(uint16_t));
|
||||
}
|
||||
|
||||
rocksdb::TransactionDB* globalRocksDB() {
|
||||
StorageEngine* engine = EngineSelectorFeature::ENGINE;
|
||||
TRI_ASSERT(engine != nullptr);
|
||||
|
|
|
@ -40,6 +40,13 @@
|
|||
#include <rocksdb/options.h>
|
||||
#include <rocksdb/status.h>
|
||||
|
||||
#undef TRI_USE_FAST_UNALIGNED_DATA_ACCESS
|
||||
#ifdef TRI_UNALIGNED_ACCESS
|
||||
#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
|
||||
#define TRI_USE_FAST_UNALIGNED_DATA_ACCESS
|
||||
#endif
|
||||
#endif
|
||||
|
||||
namespace rocksdb {
|
||||
class TransactionDB;
|
||||
class DB;
|
||||
|
@ -134,17 +141,81 @@ inline double intToDouble(uint64_t i) {
|
|||
return d;
|
||||
}
|
||||
|
||||
uint64_t uint64FromPersistent(char const* p);
|
||||
void uint64ToPersistent(char* p, uint64_t value);
|
||||
void uint64ToPersistent(std::string& out, uint64_t value);
|
||||
template<typename T>
|
||||
inline T uintFromPersistent(char const* p) {
|
||||
#ifdef TRI_USE_FAST_UNALIGNED_DATA_ACCESS
|
||||
return *reinterpret_cast<T const*>(p);
|
||||
#else
|
||||
T value = 0;
|
||||
T x = 0;
|
||||
uint8_t const* ptr = reinterpret_cast<uint8_t const*>(p);
|
||||
uint8_t const* end = ptr + sizeof(T);
|
||||
do {
|
||||
value += static_cast<T>(*ptr++) << x;
|
||||
x += 8;
|
||||
} while (ptr < end);
|
||||
return value;
|
||||
#endif
|
||||
}
|
||||
|
||||
uint32_t uint32FromPersistent(char const* p);
|
||||
void uint32ToPersistent(char* p, uint32_t value);
|
||||
void uint32ToPersistent(std::string& out, uint32_t value);
|
||||
inline uint64_t uint64FromPersistent(char const* p) {
|
||||
return uintFromPersistent<uint64_t>(p);
|
||||
}
|
||||
|
||||
inline uint32_t uint32FromPersistent(char const* p) {
|
||||
return uintFromPersistent<uint32_t>(p);
|
||||
}
|
||||
|
||||
inline uint16_t uint16FromPersistent(char const* p) {
|
||||
return uintFromPersistent<uint16_t>(p);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void uintToPersistent(char* p, T value) {
|
||||
#ifdef TRI_USE_FAST_UNALIGNED_DATA_ACCESS
|
||||
*reinterpret_cast<T*>(p) = value;
|
||||
#else
|
||||
char* end = p + sizeof(T);
|
||||
do {
|
||||
*p++ = static_cast<uint8_t>(value & 0xffU);
|
||||
value >>= 8;
|
||||
} while (p < end);
|
||||
#endif
|
||||
}
|
||||
|
||||
inline void uint64ToPersistent(char* p, uint64_t value) {
|
||||
return uintToPersistent<uint64_t>(p, value);
|
||||
}
|
||||
|
||||
inline void uint32ToPersistent(char* p, uint32_t value) {
|
||||
return uintToPersistent<uint32_t>(p, value);
|
||||
}
|
||||
|
||||
inline void uint16ToPersistent(char* p, uint16_t value) {
|
||||
return uintToPersistent<uint16_t>(p, value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
inline void uintToPersistent(std::string& p, T value) {
|
||||
size_t len = 0;
|
||||
do {
|
||||
p.push_back(static_cast<char>(value & 0xffU));
|
||||
value >>= 8;
|
||||
} while (++len < sizeof(T));
|
||||
}
|
||||
|
||||
inline void uint64ToPersistent(std::string& out, uint64_t value) {
|
||||
return uintToPersistent<uint64_t>(out, value);
|
||||
}
|
||||
|
||||
inline void uint32ToPersistent(std::string& out, uint32_t value) {
|
||||
return uintToPersistent<uint32_t>(out, value);
|
||||
}
|
||||
|
||||
inline void uint16ToPersistent(std::string& out, uint16_t value) {
|
||||
return uintToPersistent<uint16_t>(out, value);
|
||||
}
|
||||
|
||||
uint16_t uint16FromPersistent(char const* p);
|
||||
void uint16ToPersistent(char* p, uint16_t value);
|
||||
void uint16ToPersistent(std::string& out, uint16_t value);
|
||||
|
||||
rocksdb::TransactionDB* globalRocksDB();
|
||||
RocksDBEngine* globalRocksEngine();
|
||||
|
|
|
@ -57,6 +57,7 @@
|
|||
#include "RocksDBEngine/RocksDBIndexFactory.h"
|
||||
#include "RocksDBEngine/RocksDBKey.h"
|
||||
#include "RocksDBEngine/RocksDBLogValue.h"
|
||||
#include "RocksDBEngine/RocksDBOptimizerRules.h"
|
||||
#include "RocksDBEngine/RocksDBPrefixExtractor.h"
|
||||
#include "RocksDBEngine/RocksDBReplicationManager.h"
|
||||
#include "RocksDBEngine/RocksDBReplicationTailing.h"
|
||||
|
@ -1153,8 +1154,7 @@ void RocksDBEngine::addAqlFunctions() {
|
|||
|
||||
/// @brief Add engine-specific optimizer rules
|
||||
void RocksDBEngine::addOptimizerRules() {
|
||||
// there are no specific optimizer rules here
|
||||
// TODO: add geo index optimization once there is the geo index
|
||||
RocksDBOptimizerRules::registerResources();
|
||||
}
|
||||
|
||||
/// @brief Add engine-specific V8 functions
|
||||
|
|
|
@ -82,6 +82,7 @@ class RocksDBEngine final : public StorageEngine {
|
|||
void unprepare() override;
|
||||
|
||||
bool supportsDfdb() const override { return false; }
|
||||
bool useRawDocumentPointers() override { return false; }
|
||||
|
||||
TransactionManager* createTransactionManager() override;
|
||||
transaction::ContextData* createTransactionContextData() override;
|
||||
|
|
|
@ -129,12 +129,11 @@ void RocksDBExportCursor::dump(VPackBuilder& builder) {
|
|||
builder.add("result", VPackValue(VPackValueType::Array));
|
||||
size_t const n = batchSize();
|
||||
|
||||
auto cb = [&, this](ManagedDocumentResult const& mdr) {
|
||||
auto cb = [&, this](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
if (_position == _size) {
|
||||
return false;
|
||||
}
|
||||
builder.openObject();
|
||||
VPackSlice const slice(mdr.vpack());
|
||||
// Copy over shaped values
|
||||
for (auto const& entry : VPackObjectIterator(slice)) {
|
||||
std::string key(entry.key.copyString());
|
||||
|
|
|
@ -36,6 +36,7 @@
|
|||
#include "RocksDBEngine/RocksDBPrimaryIndex.h"
|
||||
#include "RocksDBEngine/RocksDBSkiplistIndex.h"
|
||||
#include "StorageEngine/EngineSelectorFeature.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
#include "VocBase/ticks.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
|
|
|
@ -21,6 +21,7 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBIterators.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Random/RandomGenerator.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
#include "RocksDBEngine/RocksDBColumnFamily.h"
|
||||
|
@ -126,10 +127,8 @@ bool RocksDBAllIndexIterator::nextDocument(
|
|||
}
|
||||
|
||||
while (limit > 0) {
|
||||
TRI_voc_rid_t revisionId =
|
||||
RocksDBKey::revisionId(RocksDBEntryType::Document, _iterator->key());
|
||||
_mmdr->setManaged((uint8_t const*)_iterator->value().data(), revisionId);
|
||||
cb(*_mmdr);
|
||||
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(RocksDBEntryType::Document, _iterator->key());
|
||||
cb(RocksDBToken(revisionId), VPackSlice(_iterator->value().data()));
|
||||
--limit;
|
||||
|
||||
if (_reverse) {
|
||||
|
@ -238,11 +237,8 @@ bool RocksDBAnyIndexIterator::nextDocument(
|
|||
}
|
||||
|
||||
while (limit > 0) {
|
||||
TRI_voc_rid_t revisionId =
|
||||
RocksDBKey::revisionId(RocksDBEntryType::Document, _iterator->key());
|
||||
_mmdr->setManaged((uint8_t const*)_iterator->value().data(), revisionId);
|
||||
cb(*_mmdr);
|
||||
|
||||
TRI_voc_rid_t revisionId = RocksDBKey::revisionId(RocksDBEntryType::Document, _iterator->key());
|
||||
cb(RocksDBToken(revisionId), VPackSlice(_iterator->value().data()));
|
||||
--limit;
|
||||
_returned++;
|
||||
_iterator->Next();
|
||||
|
|
|
@ -205,6 +205,7 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type,
|
|||
RocksDBSettingsType st) : _type(type), _buffer() {
|
||||
switch (_type) {
|
||||
case RocksDBEntryType::SettingsValue: {
|
||||
_buffer.reserve(2);
|
||||
_buffer.push_back(static_cast<char>(_type));
|
||||
_buffer.push_back(static_cast<char>(st));
|
||||
break;
|
||||
|
@ -223,8 +224,7 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first)
|
|||
case RocksDBEntryType::IndexEstimateValue:
|
||||
case RocksDBEntryType::KeyGeneratorValue:
|
||||
case RocksDBEntryType::ReplicationApplierConfig: {
|
||||
size_t length = sizeof(char) + sizeof(uint64_t);
|
||||
_buffer.reserve(length);
|
||||
_buffer.reserve(sizeof(char) + sizeof(uint64_t));
|
||||
_buffer.push_back(static_cast<char>(_type));
|
||||
uint64ToPersistent(_buffer, first); // databaseId
|
||||
break;
|
||||
|
@ -240,12 +240,11 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
|
|||
: _type(type), _buffer() {
|
||||
switch (_type) {
|
||||
case RocksDBEntryType::UniqueVPackIndexValue: {
|
||||
size_t l = sizeof(uint64_t) + static_cast<size_t>(slice.byteSize());
|
||||
_buffer.reserve(l);
|
||||
size_t const byteSize = static_cast<size_t>(slice.byteSize());
|
||||
_buffer.reserve(sizeof(uint64_t) + byteSize);
|
||||
uint64ToPersistent(_buffer, first);
|
||||
_buffer.append(reinterpret_cast<char const*>(slice.begin()),
|
||||
static_cast<size_t>(slice.byteSize()));
|
||||
TRI_ASSERT(_buffer.size() == l);
|
||||
_buffer.append(reinterpret_cast<char const*>(slice.begin()), byteSize);
|
||||
TRI_ASSERT(_buffer.size() == sizeof(uint64_t) + byteSize);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -267,7 +266,7 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first, uint64_t second)
|
|||
|
||||
case RocksDBEntryType::Collection:
|
||||
case RocksDBEntryType::View: {
|
||||
_buffer.reserve(sizeof(char) + (2 * sizeof(uint64_t)));
|
||||
_buffer.reserve(sizeof(char) + 2 * sizeof(uint64_t));
|
||||
_buffer.push_back(static_cast<char>(_type));
|
||||
uint64ToPersistent(_buffer, first); // databaseId
|
||||
uint64ToPersistent(_buffer, second); // collectionId
|
||||
|
@ -288,10 +287,10 @@ RocksDBKey::RocksDBKey(RocksDBEntryType type, uint64_t first,
|
|||
// - Key: 8-byte object ID of index + VPack array with index value(s)
|
||||
// + revisionID
|
||||
// - Value: empty
|
||||
_buffer.reserve(2 * sizeof(uint64_t) + second.byteSize());
|
||||
size_t const byteSize = static_cast<size_t>(second.byteSize());
|
||||
_buffer.reserve(2 * sizeof(uint64_t) + byteSize);
|
||||
uint64ToPersistent(_buffer, first);
|
||||
_buffer.append(reinterpret_cast<char const*>(second.begin()),
|
||||
static_cast<size_t>(second.byteSize()));
|
||||
_buffer.append(reinterpret_cast<char const*>(second.begin()), byteSize);
|
||||
uint64ToPersistent(_buffer, third);
|
||||
break;
|
||||
}
|
||||
|
@ -420,7 +419,6 @@ TRI_voc_rid_t RocksDBKey::revisionId(RocksDBEntryType type, char const* data,
|
|||
// + 8 byte revision ID + 1-byte 0xff
|
||||
return uint64FromPersistent(data + size - sizeof(uint64_t) -
|
||||
sizeof(char));
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
|
|
|
@ -47,7 +47,7 @@ class RocksDBKey {
|
|||
RocksDBKey(RocksDBKey const& other)
|
||||
: _type(other._type), _buffer(other._buffer) {}
|
||||
|
||||
RocksDBKey(RocksDBKey&& other)
|
||||
RocksDBKey(RocksDBKey&& other) noexcept
|
||||
: _type(other._type), _buffer(std::move(other._buffer)) {}
|
||||
|
||||
RocksDBKey& operator=(RocksDBKey const& other) = delete;
|
||||
|
|
|
@ -81,9 +81,9 @@ bool RocksDBReadOnlyMethods::Exists(rocksdb::ColumnFamilyHandle* cf,
|
|||
RocksDBKey const& key) {
|
||||
TRI_ASSERT(cf != nullptr);
|
||||
std::string val; // do not care about value
|
||||
bool mayExists = _db->KeyMayExist(_state->_rocksReadOptions, cf, key.string(),
|
||||
bool mayExist = _db->KeyMayExist(_state->_rocksReadOptions, cf, key.string(),
|
||||
&val, nullptr);
|
||||
if (mayExists) {
|
||||
if (mayExist) {
|
||||
rocksdb::Status s =
|
||||
_db->Get(_state->_rocksReadOptions, cf, key.string(), &val);
|
||||
return !s.IsNotFound();
|
||||
|
|
|
@ -0,0 +1,171 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBOptimizerRules.h"
|
||||
#include "Aql/Collection.h"
|
||||
#include "Aql/Condition.h"
|
||||
#include "Aql/ExecutionNode.h"
|
||||
#include "Aql/ExecutionPlan.h"
|
||||
#include "Aql/Function.h"
|
||||
#include "Aql/IndexNode.h"
|
||||
#include "Aql/Optimizer.h"
|
||||
#include "Aql/OptimizerRule.h"
|
||||
#include "Aql/OptimizerRulesFeature.h"
|
||||
#include "Aql/SortNode.h"
|
||||
#include "Cluster/ServerState.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "VocBase/LogicalCollection.h"
|
||||
|
||||
using namespace arangodb;
|
||||
using namespace arangodb::aql;
|
||||
using EN = arangodb::aql::ExecutionNode;
|
||||
|
||||
class AttributeAccessReplacer final : public WalkerWorker<ExecutionNode> {
|
||||
public:
|
||||
AttributeAccessReplacer(Variable const* variable, std::vector<std::string> const& attribute)
|
||||
: _variable(variable), _attribute(attribute) {
|
||||
TRI_ASSERT(_variable != nullptr);
|
||||
TRI_ASSERT(!_attribute.empty());
|
||||
}
|
||||
|
||||
bool before(ExecutionNode* en) override final {
|
||||
if (en->getType() == EN::CALCULATION) {
|
||||
auto node = static_cast<CalculationNode*>(en);
|
||||
node->expression()->replaceAttributeAccess(_variable, _attribute);
|
||||
}
|
||||
|
||||
// always continue
|
||||
return false;
|
||||
}
|
||||
|
||||
private:
|
||||
Variable const* _variable;
|
||||
std::vector<std::string> _attribute;
|
||||
};
|
||||
|
||||
void RocksDBOptimizerRules::registerResources() {
|
||||
OptimizerRulesFeature::registerRule("reduce-extraction-to-projection", reduceExtractionToProjectionRule,
|
||||
OptimizerRule::reduceExtractionToProjectionRule_pass6, false, true);
|
||||
}
|
||||
|
||||
// simplify an EnumerationCollectionNode that fetches an entire document to a projection of this document
|
||||
void RocksDBOptimizerRules::reduceExtractionToProjectionRule(Optimizer* opt,
|
||||
std::unique_ptr<ExecutionPlan> plan,
|
||||
OptimizerRule const* rule) {
|
||||
// These are all the nodes where we start traversing (including all
|
||||
// subqueries)
|
||||
SmallVector<ExecutionNode*>::allocator_type::arena_type a;
|
||||
SmallVector<ExecutionNode*> nodes{a};
|
||||
|
||||
std::vector<ExecutionNode::NodeType> const types = {ExecutionNode::ENUMERATE_COLLECTION};
|
||||
plan->findNodesOfType(nodes, types, true);
|
||||
|
||||
bool modified = false;
|
||||
std::unordered_set<Variable const*> vars;
|
||||
std::vector<std::string> attributeNames;
|
||||
|
||||
for (auto const& n : nodes) {
|
||||
bool stop = false;
|
||||
bool optimize = false;
|
||||
attributeNames.clear();
|
||||
DocumentProducingNode* e = dynamic_cast<DocumentProducingNode*>(n);
|
||||
if (e == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "cannot convert node to DocumentProducingNode");
|
||||
}
|
||||
|
||||
Variable const* v = e->outVariable();
|
||||
Variable const* replaceVar = nullptr;
|
||||
|
||||
ExecutionNode* current = n->getFirstParent();
|
||||
while (current != nullptr) {
|
||||
if (current->getType() == EN::CALCULATION) {
|
||||
Expression* exp = static_cast<CalculationNode*>(current)->expression();
|
||||
|
||||
if (exp != nullptr) {
|
||||
AstNode const* node = exp->node();
|
||||
vars.clear();
|
||||
current->getVariablesUsedHere(vars);
|
||||
|
||||
if (vars.find(v) != vars.end()) {
|
||||
if (attributeNames.empty()) {
|
||||
vars.clear();
|
||||
current->getVariablesUsedHere(vars);
|
||||
|
||||
if (node != nullptr) {
|
||||
if (Ast::populateSingleAttributeAccess(node, v, attributeNames)) {
|
||||
replaceVar = static_cast<CalculationNode*>(current)->outVariable();
|
||||
optimize = true;
|
||||
TRI_ASSERT(!attributeNames.empty());
|
||||
} else {
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
} else if (node != nullptr) {
|
||||
if (!Ast::variableOnlyUsedForSingleAttributeAccess(node, v, attributeNames)) {
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
} else {
|
||||
// don't know what to do
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
vars.clear();
|
||||
current->getVariablesUsedHere(vars);
|
||||
|
||||
if (vars.find(v) != vars.end()) {
|
||||
// original variable is still used here
|
||||
stop = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (stop) {
|
||||
break;
|
||||
}
|
||||
|
||||
current = current->getFirstParent();
|
||||
}
|
||||
|
||||
if (optimize && !stop) {
|
||||
TRI_ASSERT(replaceVar != nullptr);
|
||||
|
||||
AttributeAccessReplacer finder(v, attributeNames);
|
||||
plan->root()->walk(&finder);
|
||||
|
||||
std::reverse(attributeNames.begin(), attributeNames.end());
|
||||
e->setProjection(std::move(attributeNames));
|
||||
|
||||
modified = true;
|
||||
}
|
||||
}
|
||||
|
||||
opt->addPlan(std::move(plan), rule, modified);
|
||||
}
|
|
@ -0,0 +1,45 @@
|
|||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2014-2016 ArangoDB GmbH, Cologne, Germany
|
||||
/// Copyright 2004-2014 triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is ArangoDB GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifndef ARANGOD_ROCKSDB_ROCKSDB_OPTIMIZER_RULES_H
|
||||
#define ARANGOD_ROCKSDB_ROCKSDB_OPTIMIZER_RULES_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
|
||||
namespace arangodb {
|
||||
namespace aql {
|
||||
class ExecutionPlan;
|
||||
class Optimizer;
|
||||
struct OptimizerRule;
|
||||
}
|
||||
|
||||
struct RocksDBOptimizerRules {
|
||||
static void registerResources();
|
||||
|
||||
// simplify an EnumerationCollectionNode that fetches an entire document to a projection of this document
|
||||
static void reduceExtractionToProjectionRule(aql::Optimizer* opt, std::unique_ptr<aql::ExecutionPlan> plan, aql::OptimizerRule const* rule);
|
||||
};
|
||||
|
||||
} // namespace arangodb
|
||||
|
||||
#endif
|
|
@ -26,6 +26,7 @@
|
|||
#include "Basics/StringBuffer.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/VPackStringBufferAdapter.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "RocksDBEngine/RocksDBCollection.h"
|
||||
#include "RocksDBEngine/RocksDBCommon.h"
|
||||
#include "RocksDBEngine/RocksDBIterators.h"
|
||||
|
|
|
@ -29,6 +29,7 @@
|
|||
#include "RocksDBEngine/RocksDBReplicationCommon.h"
|
||||
#include "RocksDBEngine/RocksDBToken.h"
|
||||
#include "Transaction/Methods.h"
|
||||
#include "VocBase/ManagedDocumentResult.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
|
|
@ -81,12 +81,14 @@ static std::vector<arangodb::basics::AttributeName> const KeyAttribute{
|
|||
RocksDBVPackIndexIterator::RocksDBVPackIndexIterator(
|
||||
LogicalCollection* collection, transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr, arangodb::RocksDBVPackIndex const* index,
|
||||
bool reverse, RocksDBKeyBounds&& bounds)
|
||||
bool reverse, bool singleElementFetch,
|
||||
RocksDBKeyBounds&& bounds)
|
||||
: IndexIterator(collection, trx, mmdr, index),
|
||||
_index(index),
|
||||
_cmp(index->comparator()),
|
||||
_reverse(reverse),
|
||||
_bounds(bounds) {
|
||||
_singleElementFetch(singleElementFetch),
|
||||
_bounds(std::move(bounds)) {
|
||||
TRI_ASSERT(index->columnFamily() == RocksDBColumnFamily::vpack());
|
||||
|
||||
RocksDBMethods* mthds = RocksDBTransactionState::toMethods(trx);
|
||||
|
@ -146,6 +148,13 @@ bool RocksDBVPackIndexIterator::next(TokenCallback const& cb, size_t limit) {
|
|||
: RocksDBKey::revisionId(_bounds.type(), _iterator->key());
|
||||
cb(RocksDBToken(revisionId));
|
||||
|
||||
if (_singleElementFetch) {
|
||||
// we only need to fetch a single element from the index and are done then
|
||||
// this is a useful optimization because seeking forwards or backwards with the
|
||||
// iterator can be very expensive
|
||||
return false;
|
||||
}
|
||||
|
||||
--limit;
|
||||
if (_reverse) {
|
||||
_iterator->Prev();
|
||||
|
@ -738,13 +747,15 @@ RocksDBVPackIndexIterator* RocksDBVPackIndex::lookup(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool const singleElementFetch = (_unique && lastNonEq.isNone() && searchValues.length() == _fields.size());
|
||||
|
||||
RocksDBKeyBounds bounds = _unique ? RocksDBKeyBounds::UniqueVPackIndex(
|
||||
_objectId, leftBorder, rightBorder)
|
||||
: RocksDBKeyBounds::VPackIndex(
|
||||
_objectId, leftBorder, rightBorder);
|
||||
return new RocksDBVPackIndexIterator(_collection, trx, mmdr, this, reverse,
|
||||
std::move(bounds));
|
||||
singleElementFetch, std::move(bounds));
|
||||
}
|
||||
|
||||
bool RocksDBVPackIndex::accessFitsIndex(
|
||||
|
|
|
@ -66,7 +66,8 @@ class RocksDBVPackIndexIterator final : public IndexIterator {
|
|||
transaction::Methods* trx,
|
||||
ManagedDocumentResult* mmdr,
|
||||
arangodb::RocksDBVPackIndex const* index,
|
||||
bool reverse, RocksDBKeyBounds&& bounds);
|
||||
bool reverse, bool singleElementFetch,
|
||||
RocksDBKeyBounds&& bounds);
|
||||
|
||||
~RocksDBVPackIndexIterator() = default;
|
||||
|
||||
|
@ -88,6 +89,7 @@ class RocksDBVPackIndexIterator final : public IndexIterator {
|
|||
rocksdb::Comparator const* _cmp;
|
||||
std::unique_ptr<rocksdb::Iterator> _iterator;
|
||||
bool const _reverse;
|
||||
bool const _singleElementFetch;
|
||||
RocksDBKeyBounds _bounds;
|
||||
rocksdb::Slice _upperBound; // used for iterate_upper_bound
|
||||
};
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
/// @author Daniel H. Larkin
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#include "RocksDBEngine/RocksDBValue.h"
|
||||
#include "RocksDBValue.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
|
|
|
@ -111,7 +111,7 @@ class RocksDBValue {
|
|||
RocksDBValue(RocksDBEntryType type, rocksdb::Slice slice)
|
||||
: _type(type), _buffer(slice.data(), slice.size()) {}
|
||||
|
||||
RocksDBValue(RocksDBValue&& other)
|
||||
RocksDBValue(RocksDBValue&& other) noexcept
|
||||
: _type(other._type), _buffer(std::move(other._buffer)) {}
|
||||
|
||||
private:
|
||||
|
|
|
@ -53,6 +53,7 @@ SocketTask::SocketTask(arangodb::EventLoop loop,
|
|||
_connectionStatistics(nullptr),
|
||||
_connectionInfo(std::move(connectionInfo)),
|
||||
_readBuffer(TRI_UNKNOWN_MEM_ZONE, READ_BLOCK_SIZE + 1, false),
|
||||
_stringBuffers{_stringBuffersArena},
|
||||
_writeBuffer(nullptr, nullptr),
|
||||
_peer(std::move(socket)),
|
||||
_keepAliveTimeout(static_cast<long>(keepAliveTimeout * 1000)),
|
||||
|
@ -93,6 +94,11 @@ SocketTask::~SocketTask() {
|
|||
if (_peer) {
|
||||
_peer->close(err);
|
||||
}
|
||||
|
||||
// delete all string buffers we have allocated
|
||||
for (auto& it : _stringBuffers) {
|
||||
delete it;
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -235,13 +241,63 @@ void SocketTask::writeWriteBuffer() {
|
|||
}
|
||||
});
|
||||
}
|
||||
|
||||
StringBuffer* SocketTask::leaseStringBuffer(size_t length) {
|
||||
_lock.assertLockedByCurrentThread();
|
||||
|
||||
StringBuffer* buffer = nullptr;
|
||||
if (!_stringBuffers.empty()) {
|
||||
buffer = _stringBuffers.back();
|
||||
TRI_ASSERT(buffer != nullptr);
|
||||
TRI_ASSERT(buffer->length() == 0);
|
||||
|
||||
size_t const n = buffer->capacity();
|
||||
if (n < length) {
|
||||
if (buffer->reserve(length) != TRI_ERROR_NO_ERROR) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
_stringBuffers.pop_back();
|
||||
} else {
|
||||
buffer = new StringBuffer(TRI_UNKNOWN_MEM_ZONE, length, false);
|
||||
}
|
||||
|
||||
TRI_ASSERT(buffer != nullptr);
|
||||
|
||||
// should not happen. but still check for safety reasons
|
||||
if (buffer->capacity() < length) {
|
||||
delete buffer;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
|
||||
return buffer;
|
||||
}
|
||||
|
||||
void SocketTask::returnStringBuffer(StringBuffer* buffer) {
|
||||
TRI_ASSERT(buffer != nullptr);
|
||||
_lock.assertLockedByCurrentThread();
|
||||
|
||||
if (_stringBuffers.size() > 4 || buffer->capacity() >= 4 * 1024 * 1024) {
|
||||
// don't keep too many buffers around and don't hog too much memory
|
||||
delete buffer;
|
||||
return;
|
||||
}
|
||||
|
||||
try {
|
||||
buffer->reset();
|
||||
_stringBuffers.emplace_back(buffer);
|
||||
} catch (...) {
|
||||
delete buffer;
|
||||
}
|
||||
}
|
||||
|
||||
// caller must hold the _lock
|
||||
bool SocketTask::completedWriteBuffer() {
|
||||
_lock.assertLockedByCurrentThread();
|
||||
|
||||
RequestStatistics::SET_WRITE_END(_writeBuffer._statistics);
|
||||
_writeBuffer.release();
|
||||
// try to recycle the string buffer
|
||||
_writeBuffer.release(this);
|
||||
|
||||
if (_writeBuffers.empty()) {
|
||||
if (_closeRequested) {
|
||||
|
|
|
@ -31,6 +31,7 @@
|
|||
#include <list>
|
||||
|
||||
#include "Basics/Mutex.h"
|
||||
#include "Basics/SmallVector.h"
|
||||
#include "Basics/StringBuffer.h"
|
||||
#include "Basics/asio-helper.h"
|
||||
#include "Endpoint/ConnectionInfo.h"
|
||||
|
@ -123,6 +124,18 @@ class SocketTask : virtual public Task {
|
|||
_statistics = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void release(SocketTask* task) {
|
||||
if (_buffer != nullptr) {
|
||||
task->returnStringBuffer(_buffer);
|
||||
_buffer = nullptr;
|
||||
}
|
||||
|
||||
if (_statistics != nullptr) {
|
||||
_statistics->release();
|
||||
_statistics = nullptr;
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// will acquire the _lock
|
||||
|
@ -139,12 +152,18 @@ class SocketTask : virtual public Task {
|
|||
|
||||
// caller must hold the _lock
|
||||
void cancelKeepAlive();
|
||||
|
||||
basics::StringBuffer* leaseStringBuffer(size_t length);
|
||||
void returnStringBuffer(basics::StringBuffer*);
|
||||
|
||||
protected:
|
||||
Mutex _lock;
|
||||
ConnectionStatistics* _connectionStatistics;
|
||||
ConnectionInfo _connectionInfo;
|
||||
basics::StringBuffer _readBuffer; // needs _lock
|
||||
|
||||
SmallVector<basics::StringBuffer*, 32>::allocator_type::arena_type _stringBuffersArena;
|
||||
SmallVector<basics::StringBuffer*, 32> _stringBuffers; // needs _lock
|
||||
|
||||
private:
|
||||
void writeWriteBuffer();
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/ReadLocker.h"
|
||||
#include "Basics/StringRef.h"
|
||||
#include "Basics/VelocyPackHelper.h"
|
||||
#include "Basics/WriteLocker.h"
|
||||
#include "Basics/encoding.h"
|
||||
|
@ -105,11 +106,11 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
VPackSlice fromSlice;
|
||||
VPackSlice toSlice;
|
||||
|
||||
std::unordered_map<std::string, VPackSlice> newValues;
|
||||
std::unordered_map<StringRef, VPackSlice> newValues;
|
||||
{
|
||||
VPackObjectIterator it(newValue, true);
|
||||
while (it.valid()) {
|
||||
std::string key = it.key().copyString();
|
||||
StringRef key(it.key());
|
||||
if (!key.empty() && key[0] == '_' &&
|
||||
(key == StaticStrings::KeyString || key == StaticStrings::IdString ||
|
||||
key == StaticStrings::RevString ||
|
||||
|
@ -123,7 +124,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
} // else do nothing
|
||||
} else {
|
||||
// regular attribute
|
||||
newValues.emplace(std::move(key), it.value());
|
||||
newValues.emplace(key, it.value());
|
||||
}
|
||||
|
||||
it.next();
|
||||
|
@ -163,7 +164,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
{
|
||||
VPackObjectIterator it(oldValue, true);
|
||||
while (it.valid()) {
|
||||
std::string key = it.key().copyString();
|
||||
StringRef key(it.key());
|
||||
// exclude system attributes in old value now
|
||||
if (!key.empty() && key[0] == '_' &&
|
||||
(key == StaticStrings::KeyString || key == StaticStrings::IdString ||
|
||||
|
@ -178,7 +179,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
|
||||
if (found == newValues.end()) {
|
||||
// use old value
|
||||
b.add(key, it.value());
|
||||
b.add(key.data(), key.size(), it.value());
|
||||
} else if (mergeObjects && it.value().isObject() &&
|
||||
(*found).second.isObject()) {
|
||||
// merge both values
|
||||
|
@ -186,7 +187,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
if (keepNull || (!value.isNone() && !value.isNull())) {
|
||||
VPackBuilder sub =
|
||||
VPackCollection::merge(it.value(), value, true, !keepNull);
|
||||
b.add(key, sub.slice());
|
||||
b.add(key.data(), key.size(), sub.slice());
|
||||
}
|
||||
// clear the value in the map so its not added again
|
||||
(*found).second = VPackSlice();
|
||||
|
@ -194,7 +195,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
// use new value
|
||||
auto& value = (*found).second;
|
||||
if (keepNull || (!value.isNone() && !value.isNull())) {
|
||||
b.add(key, value);
|
||||
b.add(key.data(), key.size(), value);
|
||||
}
|
||||
// clear the value in the map so its not added again
|
||||
(*found).second = VPackSlice();
|
||||
|
@ -212,7 +213,7 @@ void PhysicalCollection::mergeObjectsForUpdate(
|
|||
if (!keepNull && s.isNull()) {
|
||||
continue;
|
||||
}
|
||||
b.add(it.first, s);
|
||||
b.add(it.first.data(), it.first.size(), s);
|
||||
}
|
||||
|
||||
b.close();
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "Basics/Common.h"
|
||||
#include "Basics/ReadWriteLock.h"
|
||||
#include "Indexes/Index.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
@ -145,6 +146,10 @@ class PhysicalCollection {
|
|||
virtual bool readDocument(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result) = 0;
|
||||
|
||||
virtual bool readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb) = 0;
|
||||
|
||||
virtual Result insert(arangodb::transaction::Methods* trx,
|
||||
arangodb::velocypack::Slice const newSlice,
|
||||
|
|
|
@ -367,6 +367,8 @@ class StorageEngine : public application_features::ApplicationFeature {
|
|||
,uint64_t tickStart, uint64_t tickEnd
|
||||
,std::shared_ptr<VPackBuilder>& builderSPtr) = 0;
|
||||
|
||||
virtual bool useRawDocumentPointers() = 0;
|
||||
|
||||
void getCapabilities(VPackBuilder& builder) const {
|
||||
builder.openObject();
|
||||
builder.add("name", VPackValue(typeName()));
|
||||
|
|
|
@ -34,8 +34,42 @@
|
|||
|
||||
using namespace arangodb;
|
||||
|
||||
/// @brief quick access to the _key attribute in a database document
|
||||
/// the document must have at least two attributes, and _key is supposed to
|
||||
/// be the first one
|
||||
VPackSlice transaction::helpers::extractKeyFromDocument(VPackSlice slice) {
|
||||
if (slice.isExternal()) {
|
||||
slice = slice.resolveExternal();
|
||||
}
|
||||
TRI_ASSERT(slice.isObject());
|
||||
|
||||
if (slice.isEmptyObject()) {
|
||||
return VPackSlice();
|
||||
}
|
||||
// a regular document must have at least the three attributes
|
||||
// _key, _id and _rev (in this order). _key must be the first attribute
|
||||
// however this method may also be called for remove markers, which only
|
||||
// have _key and _rev. therefore the only assertion that we can make
|
||||
// here is that the document at least has two attributes
|
||||
|
||||
uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head());
|
||||
|
||||
if (*p == basics::VelocyPackHelper::KeyAttribute) {
|
||||
// the + 1 is required so that we can skip over the attribute name
|
||||
// and point to the attribute value
|
||||
return VPackSlice(p + 1);
|
||||
}
|
||||
|
||||
// fall back to the regular lookup method
|
||||
return slice.get(StaticStrings::KeyString);
|
||||
}
|
||||
|
||||
/// @brief extract the _key attribute from a slice
|
||||
StringRef transaction::helpers::extractKeyPart(VPackSlice const slice) {
|
||||
StringRef transaction::helpers::extractKeyPart(VPackSlice slice) {
|
||||
if (slice.isExternal()) {
|
||||
slice = slice.resolveExternal();
|
||||
}
|
||||
|
||||
// extract _key
|
||||
if (slice.isObject()) {
|
||||
VPackSlice k = slice.get(StaticStrings::KeyString);
|
||||
|
|
|
@ -49,7 +49,7 @@ class Methods;
|
|||
|
||||
namespace helpers {
|
||||
/// @brief extract the _key attribute from a slice
|
||||
StringRef extractKeyPart(VPackSlice const);
|
||||
StringRef extractKeyPart(VPackSlice);
|
||||
|
||||
std::string extractIdString(CollectionNameResolver const*,
|
||||
VPackSlice, VPackSlice const&);
|
||||
|
|
|
@ -652,36 +652,6 @@ std::string transaction::Methods::extractIdString(VPackSlice slice) {
|
|||
return transaction::helpers::extractIdString(resolver(), slice, VPackSlice());
|
||||
}
|
||||
|
||||
/// @brief quick access to the _key attribute in a database document
|
||||
/// the document must have at least two attributes, and _key is supposed to
|
||||
/// be the first one
|
||||
VPackSlice transaction::helpers::extractKeyFromDocument(VPackSlice slice) {
|
||||
if (slice.isExternal()) {
|
||||
slice = slice.resolveExternal();
|
||||
}
|
||||
TRI_ASSERT(slice.isObject());
|
||||
|
||||
if (slice.isEmptyObject()) {
|
||||
return VPackSlice();
|
||||
}
|
||||
// a regular document must have at least the three attributes
|
||||
// _key, _id and _rev (in this order). _key must be the first attribute
|
||||
// however this method may also be called for remove markers, which only
|
||||
// have _key and _rev. therefore the only assertion that we can make
|
||||
// here is that the document at least has two attributes
|
||||
|
||||
uint8_t const* p = slice.begin() + slice.findDataOffset(slice.head());
|
||||
|
||||
if (*p == basics::VelocyPackHelper::KeyAttribute) {
|
||||
// the + 1 is required so that we can skip over the attribute name
|
||||
// and point to the attribute value
|
||||
return VPackSlice(p + 1);
|
||||
}
|
||||
|
||||
// fall back to the regular lookup method
|
||||
return slice.get(StaticStrings::KeyString);
|
||||
}
|
||||
|
||||
/// @brief build a VPack object with _id, _key and _rev, the result is
|
||||
/// added to the builder in the argument as a single object.
|
||||
void transaction::Methods::buildDocumentIdentity(
|
||||
|
@ -873,8 +843,8 @@ OperationResult transaction::Methods::anyLocal(
|
|||
indexScan(collectionName, transaction::Methods::CursorType::ANY, &mmdr,
|
||||
skip, limit, 1000, false);
|
||||
|
||||
cursor->allDocuments([&resultBuilder](ManagedDocumentResult const& mdr) {
|
||||
mdr.addToBuilder(resultBuilder, false);
|
||||
cursor->allDocuments([&resultBuilder](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
resultBuilder.add(slice);
|
||||
});
|
||||
|
||||
resultBuilder.close();
|
||||
|
@ -1481,110 +1451,108 @@ OperationResult transaction::Methods::insertLocal(
|
|||
EngineSelectorFeature::ENGINE->waitForSync(maxTick);
|
||||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (_state->isDBServer()) {
|
||||
if (res.ok() && _state->isDBServer()) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = !isFollower && followers->size() > 0;
|
||||
}
|
||||
std::shared_ptr<std::vector<ServerID> const> followers = followerInfo->get();
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
bool doingSynchronousReplication = !isFollower && followers->size() > 0;
|
||||
|
||||
if (doingSynchronousReplication && res.ok()) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
if (doingSynchronousReplication) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
std::string path =
|
||||
// Now replicate the good operations on all followers:
|
||||
std::string path =
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(databaseName()) +
|
||||
"/_api/document/" +
|
||||
arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" +
|
||||
ServerState::instance()->getId();
|
||||
|
||||
VPackBuilder payload;
|
||||
VPackBuilder payload;
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(&payload);
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload.add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload.add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, payload);
|
||||
};
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(&payload);
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload.add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload.add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, payload);
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (value.isArray()) {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
VPackArrayIterator itValue(value);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey("error")) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
} else {
|
||||
doOneDoc(value, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload.slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f, arangodb::rest::RequestType::POST,
|
||||
path, body);
|
||||
}
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (value.isArray()) {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
VPackArrayIterator itValue(value);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey("error")) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
} else {
|
||||
doOneDoc(value, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload.slice().toJson();
|
||||
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f, arangodb::rest::RequestType::POST,
|
||||
path, body);
|
||||
}
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
}
|
||||
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code ==
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::CREATED);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "insertLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "insertLocal: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collectionName;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1829,117 +1797,115 @@ OperationResult transaction::Methods::modifyLocal(
|
|||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (_state->isDBServer()) {
|
||||
if (res.ok() && _state->isDBServer()) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = !isFollower && followers->size() > 0;
|
||||
}
|
||||
std::shared_ptr<std::vector<ServerID> const> followers = followerInfo->get();
|
||||
bool doingSynchronousReplication = !isFollower && followers->size() > 0;
|
||||
|
||||
if (doingSynchronousReplication && res.ok()) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
if (doingSynchronousReplication) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
std::string path =
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controlled shutdown
|
||||
std::string path =
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(databaseName()) +
|
||||
"/_api/document/" +
|
||||
arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" +
|
||||
ServerState::instance()->getId();
|
||||
|
||||
VPackBuilder payload;
|
||||
VPackBuilder payload;
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(&payload);
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload.add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload.add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, payload);
|
||||
};
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(&payload);
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload.add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload.add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, payload);
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (multiCase) {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
VPackArrayIterator itValue(newValue);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey("error")) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (multiCase) {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
VPackArrayIterator itValue(newValue);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey("error")) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
} else {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
doOneDoc(newValue, ourResult);
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
doOneDoc(newValue, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload.slice().toJson();
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload.slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f,
|
||||
operation == TRI_VOC_DOCUMENT_OPERATION_REPLACE
|
||||
? arangodb::rest::RequestType::PUT
|
||||
: arangodb::rest::RequestType::PATCH,
|
||||
path, body);
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f,
|
||||
operation == TRI_VOC_DOCUMENT_OPERATION_REPLACE
|
||||
? arangodb::rest::RequestType::PUT
|
||||
: arangodb::rest::RequestType::PATCH,
|
||||
path, body);
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
}
|
||||
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
// Otherwise we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code ==
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::OK);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "modifyLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "modifyLocal: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collectionName;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "modifyLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2112,112 +2078,110 @@ OperationResult transaction::Methods::removeLocal(
|
|||
}
|
||||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
bool doingSynchronousReplication = false;
|
||||
if (_state->isDBServer()) {
|
||||
if (res.ok() && _state->isDBServer()) {
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
doingSynchronousReplication = !isFollower && followers->size() > 0;
|
||||
}
|
||||
std::shared_ptr<std::vector<ServerID> const> followers = followerInfo->get();
|
||||
bool doingSynchronousReplication = !isFollower && followers->size() > 0;
|
||||
|
||||
if (doingSynchronousReplication && res.ok()) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
if (doingSynchronousReplication) {
|
||||
// In the multi babies case res is always TRI_ERROR_NO_ERROR if we
|
||||
// get here, in the single document case, we do not try to replicate
|
||||
// in case of an error.
|
||||
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controled shutdown
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
if (cc != nullptr) {
|
||||
// nullptr only happens on controled shutdown
|
||||
|
||||
std::string path =
|
||||
std::string path =
|
||||
"/_db/" + arangodb::basics::StringUtils::urlEncode(databaseName()) +
|
||||
"/_api/document/" +
|
||||
arangodb::basics::StringUtils::urlEncode(collection->name()) +
|
||||
"?isRestore=true&isSynchronousReplication=" +
|
||||
ServerState::instance()->getId();
|
||||
|
||||
VPackBuilder payload;
|
||||
VPackBuilder payload;
|
||||
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(&payload);
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload.add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload.add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, payload);
|
||||
};
|
||||
auto doOneDoc = [&](VPackSlice const& doc, VPackSlice result) {
|
||||
VPackObjectBuilder guard(&payload);
|
||||
VPackSlice s = result.get(StaticStrings::KeyString);
|
||||
payload.add(StaticStrings::KeyString, s);
|
||||
s = result.get(StaticStrings::RevString);
|
||||
payload.add(StaticStrings::RevString, s);
|
||||
TRI_SanitizeObject(doc, payload);
|
||||
};
|
||||
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (value.isArray()) {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
VPackArrayIterator itValue(value);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey("error")) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
VPackSlice ourResult = resultBuilder.slice();
|
||||
size_t count = 0;
|
||||
if (value.isArray()) {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
VPackArrayIterator itValue(value);
|
||||
VPackArrayIterator itResult(ourResult);
|
||||
while (itValue.valid() && itResult.valid()) {
|
||||
TRI_ASSERT((*itResult).isObject());
|
||||
if (!(*itResult).hasKey("error")) {
|
||||
doOneDoc(itValue.value(), itResult.value());
|
||||
count++;
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
}
|
||||
itValue.next();
|
||||
itResult.next();
|
||||
} else {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
doOneDoc(value, ourResult);
|
||||
count++;
|
||||
}
|
||||
} else {
|
||||
VPackArrayBuilder guard(&payload);
|
||||
doOneDoc(value, ourResult);
|
||||
count++;
|
||||
}
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload.slice().toJson();
|
||||
if (count > 0) {
|
||||
auto body = std::make_shared<std::string>();
|
||||
*body = payload.slice().toJson();
|
||||
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f,
|
||||
arangodb::rest::RequestType::DELETE_REQ, path,
|
||||
body);
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
// Now prepare the requests:
|
||||
std::vector<ClusterCommRequest> requests;
|
||||
for (auto const& f : *followers) {
|
||||
requests.emplace_back("server:" + f,
|
||||
arangodb::rest::RequestType::DELETE_REQ, path,
|
||||
body);
|
||||
}
|
||||
size_t nrDone = 0;
|
||||
size_t nrGood = cc->performRequests(requests, chooseTimeout(count),
|
||||
nrDone, Logger::REPLICATION, false);
|
||||
if (nrGood < followers->size()) {
|
||||
// If any would-be-follower refused to follow there must be a
|
||||
// new leader in the meantime, in this case we must not allow
|
||||
// this operation to succeed, we simply return with a refusal
|
||||
// error (note that we use the follower version, since we have
|
||||
// lost leadership):
|
||||
if (findRefusal(requests)) {
|
||||
return OperationResult(TRI_ERROR_CLUSTER_SHARD_LEADER_RESIGNED);
|
||||
}
|
||||
|
||||
// we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
// we drop all followers that were not successful:
|
||||
for (size_t i = 0; i < followers->size(); ++i) {
|
||||
bool replicationWorked =
|
||||
requests[i].done &&
|
||||
requests[i].result.status == CL_COMM_RECEIVED &&
|
||||
(requests[i].result.answer_code ==
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
rest::ResponseCode::ACCEPTED ||
|
||||
requests[i].result.answer_code == rest::ResponseCode::OK);
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
if (replicationWorked) {
|
||||
bool found;
|
||||
requests[i].result.answer->header(StaticStrings::ErrorCodes,
|
||||
found);
|
||||
replicationWorked = !found;
|
||||
}
|
||||
if (!replicationWorked) {
|
||||
auto const& followerInfo = collection->followers();
|
||||
if (followerInfo->remove((*followers)[i])) {
|
||||
LOG_TOPIC(WARN, Logger::REPLICATION)
|
||||
<< "removeLocal: dropping follower " << (*followers)[i]
|
||||
<< " for shard " << collectionName;
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
} else {
|
||||
LOG_TOPIC(ERR, Logger::REPLICATION)
|
||||
<< "removeLocal: could not drop follower "
|
||||
<< (*followers)[i] << " for shard " << collectionName;
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_CLUSTER_COULD_NOT_DROP_FOLLOWER);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2284,9 +2248,8 @@ OperationResult transaction::Methods::allLocal(
|
|||
return OperationResult(cursor->code);
|
||||
}
|
||||
|
||||
auto cb = [&resultBuilder](ManagedDocumentResult const& mdr) {
|
||||
uint8_t const* vpack = mdr.vpack();
|
||||
resultBuilder.add(VPackSlice(vpack));
|
||||
auto cb = [&resultBuilder](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
resultBuilder.add(slice);
|
||||
};
|
||||
cursor->allDocuments(cb);
|
||||
|
||||
|
@ -2373,10 +2336,9 @@ OperationResult transaction::Methods::truncateLocal(
|
|||
|
||||
// Now see whether or not we have to do synchronous replication:
|
||||
if (_state->isDBServer()) {
|
||||
std::shared_ptr<std::vector<ServerID> const> followers;
|
||||
// Now replicate the same operation on all followers:
|
||||
auto const& followerInfo = collection->followers();
|
||||
followers = followerInfo->get();
|
||||
std::shared_ptr<std::vector<ServerID> const> followers = followerInfo->get();
|
||||
if (!isFollower && followers->size() > 0) {
|
||||
// Now replicate the good operations on all followers:
|
||||
auto cc = arangodb::ClusterComm::instance();
|
||||
|
|
|
@ -95,7 +95,7 @@ bool OperationCursor::next(IndexIterator::TokenCallback const& callback, uint64_
|
|||
}
|
||||
|
||||
bool OperationCursor::nextDocument(IndexIterator::DocumentCallback const& callback,
|
||||
uint64_t batchSize) {
|
||||
uint64_t batchSize) {
|
||||
if (!hasMore()) {
|
||||
return false;
|
||||
}
|
||||
|
@ -110,9 +110,9 @@ bool OperationCursor::nextDocument(IndexIterator::DocumentCallback const& callba
|
|||
// We add wrapper around Callback that validates that
|
||||
// the callback has been called at least once.
|
||||
bool called = false;
|
||||
auto cb = [&](ManagedDocumentResult const& mdr) {
|
||||
auto cb = [&](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
called = true;
|
||||
callback(mdr);
|
||||
callback(token, slice);
|
||||
};
|
||||
_hasMore = _indexIterator->nextDocument(cb, atMost);
|
||||
if (_hasMore) {
|
||||
|
|
|
@ -256,8 +256,8 @@ static void JS_AllQuery(v8::FunctionCallbackInfo<v8::Value> const& args) {
|
|||
VPackBuilder resultBuilder;
|
||||
resultBuilder.openArray();
|
||||
|
||||
opCursor->allDocuments([&resultBuilder](ManagedDocumentResult const& mdr) {
|
||||
resultBuilder.add(VPackSlice(mdr.vpack()));
|
||||
opCursor->allDocuments([&resultBuilder](DocumentIdentifierToken const& token, VPackSlice slice) {
|
||||
resultBuilder.add(slice);
|
||||
});
|
||||
|
||||
resultBuilder.close();
|
||||
|
|
|
@ -1170,6 +1170,12 @@ bool LogicalCollection::readDocument(transaction::Methods* trx,
|
|||
return getPhysical()->readDocument(trx, token, result);
|
||||
}
|
||||
|
||||
bool LogicalCollection::readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb) {
|
||||
return getPhysical()->readDocumentWithCallback(trx, token, cb);
|
||||
}
|
||||
|
||||
/// @brief a method to skip certain documents in AQL write operations,
|
||||
/// this is only used in the enterprise edition for smart graphs
|
||||
#ifndef USE_ENTERPRISE
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#define ARANGOD_VOCBASE_LOGICAL_COLLECTION_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Indexes/IndexIterator.h"
|
||||
#include "VocBase/voc-types.h"
|
||||
#include "VocBase/vocbase.h"
|
||||
|
||||
|
@ -278,6 +279,10 @@ class LogicalCollection {
|
|||
bool readDocument(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
ManagedDocumentResult& result);
|
||||
|
||||
bool readDocumentWithCallback(transaction::Methods* trx,
|
||||
DocumentIdentifierToken const& token,
|
||||
IndexIterator::DocumentCallback const& cb);
|
||||
|
||||
/// @brief Persist the connected physical collection.
|
||||
/// This should be called AFTER the collection is successfully
|
||||
|
|
|
@ -70,6 +70,15 @@ void ManagedDocumentResult::setManaged(uint8_t const* vpack, TRI_voc_rid_t revis
|
|||
_managed = true;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::setManagedAfterStringUsage(TRI_voc_rid_t revisionId) {
|
||||
TRI_ASSERT(!_string.empty());
|
||||
TRI_ASSERT(_useString);
|
||||
|
||||
_vpack = reinterpret_cast<uint8_t*>(const_cast<char*>(_string.data()));
|
||||
_lastRevisionId = revisionId;
|
||||
_useString = true;
|
||||
}
|
||||
|
||||
void ManagedDocumentResult::setManaged(std::string&& str, TRI_voc_rid_t revisionId) {
|
||||
reset();
|
||||
_string = std::move(str);
|
||||
|
@ -79,16 +88,16 @@ void ManagedDocumentResult::setManaged(std::string&& str, TRI_voc_rid_t revision
|
|||
}
|
||||
|
||||
void ManagedDocumentResult::reset() noexcept {
|
||||
if(_managed) {
|
||||
if (_managed) {
|
||||
delete[] _vpack;
|
||||
}
|
||||
_managed = false;
|
||||
_length = 0;
|
||||
|
||||
if(_useString){
|
||||
if (_useString) {
|
||||
_string.clear();
|
||||
_useString = false;
|
||||
}
|
||||
_useString = false;
|
||||
|
||||
_lastRevisionId = 0;
|
||||
_vpack = nullptr;
|
||||
|
@ -97,22 +106,8 @@ void ManagedDocumentResult::reset() noexcept {
|
|||
void ManagedDocumentResult::addToBuilder(velocypack::Builder& builder, bool allowExternals) const {
|
||||
TRI_ASSERT(!empty());
|
||||
if (allowExternals && canUseInExternal()) {
|
||||
builder.addExternal(_vpack);
|
||||
builder.add(velocypack::Slice(_vpack));
|
||||
} else {
|
||||
builder.add(velocypack::Slice(_vpack));
|
||||
}
|
||||
}
|
||||
|
||||
// @brief Creates an AQLValue with the content of this ManagedDocumentResult
|
||||
// The caller is responsible to properly destroy() the
|
||||
// returned value
|
||||
AqlValue ManagedDocumentResult::createAqlValue() const {
|
||||
TRI_ASSERT(!empty());
|
||||
if (canUseInExternal()) {
|
||||
// No need to copy. Underlying structure guarantees that Slices stay
|
||||
// valid
|
||||
return AqlValue(_vpack, AqlValueFromManagedDocument());
|
||||
}
|
||||
// Do copy. Otherwise the slice may go out of scope
|
||||
return AqlValue(VPackSlice(_vpack));
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ class ManagedDocumentResult {
|
|||
ManagedDocumentResult& operator=(ManagedDocumentResult const& other) = delete;
|
||||
|
||||
ManagedDocumentResult& operator=(ManagedDocumentResult&& other) {
|
||||
if (other._useString){
|
||||
if (other._useString) {
|
||||
setManaged(std::move(other._string), other._lastRevisionId);
|
||||
other._managed = false;
|
||||
other.reset();
|
||||
|
@ -81,6 +81,14 @@ class ManagedDocumentResult {
|
|||
inline TRI_voc_rid_t lastRevisionId() const { return _lastRevisionId; }
|
||||
|
||||
void reset() noexcept;
|
||||
|
||||
std::string* prepareStringUsage() {
|
||||
reset();
|
||||
_useString = true;
|
||||
return &_string;
|
||||
}
|
||||
|
||||
void setManagedAfterStringUsage(TRI_voc_rid_t revisionId);
|
||||
|
||||
inline uint8_t const* vpack() const {
|
||||
TRI_ASSERT(_vpack != nullptr);
|
||||
|
@ -95,11 +103,6 @@ class ManagedDocumentResult {
|
|||
|
||||
void addToBuilder(velocypack::Builder& builder, bool allowExternals) const;
|
||||
|
||||
// @brief Creates an AQLValue with the content of this ManagedDocumentResult
|
||||
// The caller is responsible to properly destroy() the
|
||||
// returned value
|
||||
aql::AqlValue createAqlValue() const;
|
||||
|
||||
private:
|
||||
uint64_t _length;
|
||||
TRI_voc_rid_t _lastRevisionId;
|
||||
|
|
|
@ -823,6 +823,13 @@ function processQuery (query, explain) {
|
|||
return results[0];
|
||||
};
|
||||
|
||||
var projection = function (node) {
|
||||
if (node.projection && node.projection.length > 0) {
|
||||
return ', projection: `' + node.projection.join('`.`') + '`';
|
||||
}
|
||||
return '';
|
||||
};
|
||||
|
||||
var label = function (node) {
|
||||
var rc, v, e, edgeCols;
|
||||
var parts = [];
|
||||
|
@ -833,7 +840,7 @@ function processQuery (query, explain) {
|
|||
return keyword('EMPTY') + ' ' + annotation('/* empty result set */');
|
||||
case 'EnumerateCollectionNode':
|
||||
collectionVariables[node.outVariable.id] = node.collection;
|
||||
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + collection(node.collection) + ' ' + annotation('/* full collection scan' + (node.random ? ', random order' : '') + (node.satellite ? ', satellite' : '') + ' */');
|
||||
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + collection(node.collection) + ' ' + annotation('/* full collection scan' + (node.random ? ', random order' : '') + projection(node) + (node.satellite ? ', satellite' : '') + ' */');
|
||||
case 'EnumerateListNode':
|
||||
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + variableName(node.inVariable) + ' ' + annotation('/* list iteration */');
|
||||
case 'IndexNode':
|
||||
|
@ -853,7 +860,7 @@ function processQuery (query, explain) {
|
|||
}
|
||||
indexes.push(idx);
|
||||
});
|
||||
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + collection(node.collection) + ' ' + annotation('/* ' + types.join(', ') + (node.satellite ? ', satellite' : '') + ' */');
|
||||
return keyword('FOR') + ' ' + variableName(node.outVariable) + ' ' + keyword('IN') + ' ' + collection(node.collection) + ' ' + annotation('/* ' + types.join(', ') + projection(node) + (node.satellite ? ', satellite' : '') + ' */');
|
||||
case 'IndexRangeNode':
|
||||
collectionVariables[node.outVariable.id] = node.collection;
|
||||
var index = node.index;
|
||||
|
|
|
@ -40,9 +40,9 @@ function optimizerRuleTestSuite () {
|
|||
var ruleName = "distribute-in-cluster";
|
||||
// various choices to control the optimizer:
|
||||
var rulesNone = { optimizer: { rules: [ "-all" ] } };
|
||||
var rulesAll = { optimizer: { rules: [ "+all" ] } };
|
||||
var rulesAll = { optimizer: { rules: [ "+all", "-reduce-extraction-to-projection" ] } };
|
||||
var thisRuleEnabled = { optimizer: { rules: [ "-all", "+" + ruleName ] } };
|
||||
var thisRuleDisabled = { optimizer: { rules: [ "+all", "-" + ruleName ] } };
|
||||
var thisRuleDisabled = { optimizer: { rules: [ "+all", "-reduce-extraction-to-projection", "-" + ruleName ] } };
|
||||
var maxPlans = { optimizer: { rules: [ "-all" ] }, maxNumberOfPlans: 1 };
|
||||
|
||||
var cn1 = "UnitTestsAqlOptimizerRuleUndist1";
|
||||
|
|
|
@ -0,0 +1,133 @@
|
|||
/*jshint globalstrict:false, strict:false, maxlen: 500 */
|
||||
/*global assertEqual, assertNotEqual, assertTrue, AQL_EXPLAIN, AQL_EXECUTE */
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tests for optimizer rules
|
||||
///
|
||||
/// @file
|
||||
///
|
||||
/// DISCLAIMER
|
||||
///
|
||||
/// Copyright 2010-2012 triagens GmbH, Cologne, Germany
|
||||
///
|
||||
/// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
/// you may not use this file except in compliance with the License.
|
||||
/// You may obtain a copy of the License at
|
||||
///
|
||||
/// http://www.apache.org/licenses/LICENSE-2.0
|
||||
///
|
||||
/// Unless required by applicable law or agreed to in writing, software
|
||||
/// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
/// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
/// See the License for the specific language governing permissions and
|
||||
/// limitations under the License.
|
||||
///
|
||||
/// Copyright holder is triAGENS GmbH, Cologne, Germany
|
||||
///
|
||||
/// @author Jan Steemann
|
||||
/// @author Copyright 2012, triAGENS GmbH, Cologne, Germany
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
var jsunity = require("jsunity");
|
||||
var helper = require("@arangodb/aql-helper");
|
||||
var isEqual = helper.isEqual;
|
||||
var db = require("@arangodb").db;
|
||||
var ruleName = "reduce-extraction-to-projection";
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
function optimizerRuleTestSuite () {
|
||||
var c = null;
|
||||
var cn = "UnitTestsOptimizer";
|
||||
|
||||
return {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set up
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
setUp : function () {
|
||||
db._drop(cn);
|
||||
c = db._create(cn);
|
||||
|
||||
for (var i = 0; i < 1000; ++i) {
|
||||
c.insert({ value1: i, value2: "test" + i });
|
||||
}
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief tear down
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
tearDown : function () {
|
||||
db._drop(cn);
|
||||
c = null;
|
||||
},
|
||||
|
||||
testNotActive : function () {
|
||||
var queries = [
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 RETURN doc",
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 RETURN doc.value2",
|
||||
"FOR doc IN @@cn SORT doc.value1, doc.value2 RETURN doc",
|
||||
"FOR doc IN @@cn SORT doc.value1 RETURN doc.value2",
|
||||
"FOR doc IN @@cn COLLECT v = doc.value1 INTO g RETURN g",
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 SORT doc.value2 RETURN doc.value1",
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 RETURN doc",
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 FILTER doc.value2 == 1 RETURN doc.value1",
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 && doc.value2 == 1 RETURN doc.value2",
|
||||
"FOR doc IN @@cn FILTER doc.value1 >= 132 && doc.value <= 134 SORT doc.value1 RETURN doc.value1"
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query, { "@cn" : cn });
|
||||
assertEqual(-1, result.plan.rules.indexOf(ruleName), query);
|
||||
});
|
||||
},
|
||||
|
||||
testActive : function () {
|
||||
var queries = [
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 RETURN doc.value1",
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 RETURN 1",
|
||||
"FOR doc IN @@cn SORT doc.value1 RETURN doc.value1",
|
||||
"FOR doc IN @@cn SORT doc.value1 RETURN 1",
|
||||
"FOR doc IN @@cn COLLECT v = doc.value1 INTO g RETURN v", // g will be optimized away
|
||||
"FOR doc IN @@cn FILTER doc.value1 == 1 SORT doc.value1 RETURN doc.value1",
|
||||
"FOR doc IN @@cn FILTER doc.value1 > 1 SORT doc.value1 RETURN doc.value1",
|
||||
"FOR doc IN @@cn SORT doc.value1 RETURN doc.value1"
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query, { "@cn" : cn });
|
||||
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
|
||||
});
|
||||
},
|
||||
|
||||
testResults : function () {
|
||||
var queries = [
|
||||
[ "FOR doc IN @@cn FILTER doc.value1 == 1 RETURN 42", [ 42 ] ],
|
||||
[ "FOR doc IN @@cn FILTER doc.value1 == 1 RETURN doc.value1", [ 1 ] ],
|
||||
[ "FOR doc IN @@cn FILTER doc.value1 <= 1 SORT doc.value1 RETURN doc.value1", [ 0, 1 ] ],
|
||||
[ "FOR doc IN @@cn FILTER doc.value1 >= 132 && doc.value1 <= 134 SORT doc.value1 RETURN doc.value1", [ 132, 133, 134 ] ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query[0], { "@cn" : cn });
|
||||
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query[0]);
|
||||
|
||||
result = AQL_EXECUTE(query[0], { "@cn" : cn });
|
||||
assertEqual(query[1], result.json);
|
||||
});
|
||||
},
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief executes the test suite
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
jsunity.run(optimizerRuleTestSuite);
|
||||
|
||||
return jsunity.done();
|
|
@ -27,26 +27,17 @@
|
|||
#include "Basics/fpconv.h"
|
||||
#include "Zip/zip.h"
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append a character without check
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static inline void AppendChar(TRI_string_buffer_t* self, char chr) {
|
||||
*self->_current++ = chr;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief how much space is presently left in buffer?
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static inline size_t Remaining(TRI_string_buffer_t* self) {
|
||||
return self->_len - static_cast<size_t>(self->_current - self->_buffer);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief reserve space
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int Reserve(TRI_string_buffer_t* self, size_t size) {
|
||||
if (size > Remaining(self)) {
|
||||
ptrdiff_t off = self->_current - self->_buffer;
|
||||
|
@ -72,10 +63,7 @@ static int Reserve(TRI_string_buffer_t* self, size_t size) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief append a string to a string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static int AppendString(TRI_string_buffer_t* self, char const* str,
|
||||
size_t const len) {
|
||||
if (0 < len) {
|
||||
|
@ -92,10 +80,7 @@ static int AppendString(TRI_string_buffer_t* self, char const* str,
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create a new string buffer and initialize it
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_string_buffer_t* TRI_CreateStringBuffer(TRI_memory_zone_t* zone) {
|
||||
auto self = static_cast<TRI_string_buffer_t*>(TRI_Allocate(
|
||||
zone, sizeof(TRI_string_buffer_t), false));
|
||||
|
@ -109,10 +94,7 @@ TRI_string_buffer_t* TRI_CreateStringBuffer(TRI_memory_zone_t* zone) {
|
|||
return self;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create a new string buffer and initialize it with a specific size
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_string_buffer_t* TRI_CreateSizedStringBuffer(TRI_memory_zone_t* zone,
|
||||
size_t size) {
|
||||
auto self = static_cast<TRI_string_buffer_t*>(TRI_Allocate(
|
||||
|
@ -127,12 +109,9 @@ TRI_string_buffer_t* TRI_CreateSizedStringBuffer(TRI_memory_zone_t* zone,
|
|||
return self;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initializes the string buffer
|
||||
///
|
||||
/// @warning You must call initialize before using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_InitStringBuffer(TRI_string_buffer_t* self, TRI_memory_zone_t* zone,
|
||||
bool initializeMemory) {
|
||||
self->_memoryZone = zone;
|
||||
|
@ -144,12 +123,9 @@ void TRI_InitStringBuffer(TRI_string_buffer_t* self, TRI_memory_zone_t* zone,
|
|||
Reserve(self, 120);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initializes the string buffer with a specific size
|
||||
///
|
||||
/// @warning You must call initialize before using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_InitSizedStringBuffer(TRI_string_buffer_t* self,
|
||||
TRI_memory_zone_t* zone, size_t const length,
|
||||
bool initializeMemory) {
|
||||
|
@ -164,24 +140,18 @@ void TRI_InitSizedStringBuffer(TRI_string_buffer_t* self,
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer
|
||||
///
|
||||
/// @warning You must call free or destroy after using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_DestroyStringBuffer(TRI_string_buffer_t* self) {
|
||||
if (self->_buffer != nullptr) {
|
||||
TRI_Free(self->_memoryZone, self->_buffer);
|
||||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer and cleans the buffer
|
||||
///
|
||||
/// @warning You must call free or destroy after using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_AnnihilateStringBuffer(TRI_string_buffer_t* self) {
|
||||
if (self->_buffer != nullptr) {
|
||||
// somewhat paranoid? don't ask me
|
||||
|
@ -191,19 +161,13 @@ void TRI_AnnihilateStringBuffer(TRI_string_buffer_t* self) {
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer and the pointer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_FreeStringBuffer(TRI_memory_zone_t* zone, TRI_string_buffer_t* self) {
|
||||
TRI_DestroyStringBuffer(self);
|
||||
TRI_Free(zone, self);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief compress the string buffer using deflate
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_DeflateStringBuffer(TRI_string_buffer_t* self, size_t bufferSize) {
|
||||
TRI_string_buffer_t deflated;
|
||||
char const* ptr;
|
||||
|
@ -287,10 +251,7 @@ int TRI_DeflateStringBuffer(TRI_string_buffer_t* self, size_t bufferSize) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ensure the string buffer has a specific capacity
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_ReserveStringBuffer(TRI_string_buffer_t* self, size_t const length) {
|
||||
if (length > 0) {
|
||||
return Reserve(self, length);
|
||||
|
@ -298,10 +259,7 @@ int TRI_ReserveStringBuffer(TRI_string_buffer_t* self, size_t const length) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief swaps content with another string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_SwapStringBuffer(TRI_string_buffer_t* self,
|
||||
TRI_string_buffer_t* other) {
|
||||
char* otherBuffer = other->_buffer;
|
||||
|
@ -320,51 +278,38 @@ void TRI_SwapStringBuffer(TRI_string_buffer_t* self,
|
|||
self->_memoryZone = otherZone;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the beginning of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* TRI_BeginStringBuffer(TRI_string_buffer_t const* self) {
|
||||
return self->_buffer;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the end of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* TRI_EndStringBuffer(TRI_string_buffer_t const* self) {
|
||||
return self->_current;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns length of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
size_t TRI_LengthStringBuffer(TRI_string_buffer_t const* self) {
|
||||
return (size_t)(self->_current - self->_buffer);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief increases length of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns capacity of the character buffer
|
||||
size_t TRI_CapacityStringBuffer(TRI_string_buffer_t const* self) {
|
||||
return self->_len;
|
||||
}
|
||||
|
||||
/// @brief increases length of the character buffer
|
||||
void TRI_IncreaseLengthStringBuffer(TRI_string_buffer_t* self, size_t n) {
|
||||
self->_current += n;
|
||||
*self->_current = '\0';
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns true if buffer is empty
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool TRI_EmptyStringBuffer(TRI_string_buffer_t const* self) {
|
||||
return self->_buffer == self->_current;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clears the buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_ClearStringBuffer(TRI_string_buffer_t* self) {
|
||||
if (self->_buffer != nullptr) {
|
||||
if (self->_len > 0 && self->_current == self->_buffer) {
|
||||
|
@ -383,10 +328,7 @@ void TRI_ClearStringBuffer(TRI_string_buffer_t* self) {
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief resets the buffer (without clearing)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_ResetStringBuffer(TRI_string_buffer_t* self) {
|
||||
if (self->_buffer != nullptr) {
|
||||
self->_current = self->_buffer;
|
||||
|
@ -397,10 +339,7 @@ void TRI_ResetStringBuffer(TRI_string_buffer_t* self) {
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief steals the buffer of a string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char* TRI_StealStringBuffer(TRI_string_buffer_t* self) {
|
||||
char* result = self->_buffer;
|
||||
|
||||
|
@ -413,20 +352,14 @@ char* TRI_StealStringBuffer(TRI_string_buffer_t* self) {
|
|||
return result;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief copies the string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_CopyStringBuffer(TRI_string_buffer_t* self,
|
||||
TRI_string_buffer_t const* source) {
|
||||
return TRI_ReplaceStringStringBuffer(
|
||||
self, source->_buffer, (size_t)(source->_current - source->_buffer));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes the first characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_EraseFrontStringBuffer(TRI_string_buffer_t* self, size_t len) {
|
||||
size_t off = (size_t)(self->_current - self->_buffer);
|
||||
|
||||
|
@ -439,11 +372,8 @@ void TRI_EraseFrontStringBuffer(TRI_string_buffer_t* self, size_t len) {
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes the first characters but does not clear the remaining
|
||||
/// buffer space
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_MoveFrontStringBuffer(TRI_string_buffer_t* self, size_t len) {
|
||||
size_t off = (size_t)(self->_current - self->_buffer);
|
||||
|
||||
|
@ -456,10 +386,7 @@ void TRI_MoveFrontStringBuffer(TRI_string_buffer_t* self, size_t len) {
|
|||
}
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief replaces characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_ReplaceStringStringBuffer(TRI_string_buffer_t* self, char const* str,
|
||||
size_t len) {
|
||||
self->_current = self->_buffer;
|
||||
|
@ -467,10 +394,7 @@ int TRI_ReplaceStringStringBuffer(TRI_string_buffer_t* self, char const* str,
|
|||
return TRI_AppendString2StringBuffer(self, str, len);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends character
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCharStringBuffer(TRI_string_buffer_t* self, char chr) {
|
||||
int res = Reserve(self, 1);
|
||||
|
||||
|
@ -482,18 +406,12 @@ int TRI_AppendCharStringBuffer(TRI_string_buffer_t* self, char chr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendStringStringBuffer(TRI_string_buffer_t* self, char const* str) {
|
||||
return AppendString(self, str, strlen(str));
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendString2StringBuffer(TRI_string_buffer_t* self, char const* str,
|
||||
size_t len) {
|
||||
return AppendString(self, str, len);
|
||||
|
@ -607,20 +525,14 @@ int AppendJsonEncoded(TRI_string_buffer_t* self, char const* src,
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters but json-encode the string
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendJsonEncodedStringStringBuffer(TRI_string_buffer_t* self,
|
||||
char const* src, size_t length,
|
||||
bool escapeSlash) {
|
||||
return AppendJsonEncoded(self, src, length, escapeSlash);
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with two digits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInteger2StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
||||
int res = Reserve(self, 2);
|
||||
|
||||
|
@ -634,10 +546,7 @@ int TRI_AppendInteger2StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with three digits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInteger3StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
||||
int res = Reserve(self, 3);
|
||||
|
||||
|
@ -652,10 +561,7 @@ int TRI_AppendInteger3StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with four digits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInteger4StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
||||
int res = Reserve(self, 4);
|
||||
|
||||
|
@ -671,10 +577,7 @@ int TRI_AppendInteger4StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 8 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt8StringBuffer(TRI_string_buffer_t* self, int8_t attr) {
|
||||
int res = Reserve(self, 4);
|
||||
|
||||
|
@ -688,10 +591,7 @@ int TRI_AppendInt8StringBuffer(TRI_string_buffer_t* self, int8_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 8 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt8StringBuffer(TRI_string_buffer_t* self, uint8_t attr) {
|
||||
int res = Reserve(self, 3);
|
||||
|
||||
|
@ -705,10 +605,7 @@ int TRI_AppendUInt8StringBuffer(TRI_string_buffer_t* self, uint8_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 16 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt16StringBuffer(TRI_string_buffer_t* self, int16_t attr) {
|
||||
int res = Reserve(self, 6);
|
||||
|
||||
|
@ -722,10 +619,7 @@ int TRI_AppendInt16StringBuffer(TRI_string_buffer_t* self, int16_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt16StringBuffer(TRI_string_buffer_t* self, uint16_t attr) {
|
||||
int res = Reserve(self, 5);
|
||||
|
||||
|
@ -739,10 +633,7 @@ int TRI_AppendUInt16StringBuffer(TRI_string_buffer_t* self, uint16_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 32 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt32StringBuffer(TRI_string_buffer_t* self, int32_t attr) {
|
||||
int res = Reserve(self, 11);
|
||||
|
||||
|
@ -756,10 +647,7 @@ int TRI_AppendInt32StringBuffer(TRI_string_buffer_t* self, int32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt32StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
||||
int res = Reserve(self, 10);
|
||||
|
||||
|
@ -773,10 +661,7 @@ int TRI_AppendUInt32StringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 64 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt64StringBuffer(TRI_string_buffer_t* self, int64_t attr) {
|
||||
int res = Reserve(self, 20);
|
||||
|
||||
|
@ -790,10 +675,7 @@ int TRI_AppendInt64StringBuffer(TRI_string_buffer_t* self, int64_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 64 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt64StringBuffer(TRI_string_buffer_t* self, uint64_t attr) {
|
||||
int res = Reserve(self, 21);
|
||||
|
||||
|
@ -807,10 +689,7 @@ int TRI_AppendUInt64StringBuffer(TRI_string_buffer_t* self, uint64_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits in hex
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt32HexStringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
||||
int res = Reserve(self, 5);
|
||||
|
||||
|
@ -824,10 +703,7 @@ int TRI_AppendUInt32HexStringBuffer(TRI_string_buffer_t* self, uint32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 64 bits in hex
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt64HexStringBuffer(TRI_string_buffer_t* self, uint64_t attr) {
|
||||
int res = Reserve(self, 9);
|
||||
|
||||
|
@ -841,10 +717,7 @@ int TRI_AppendUInt64HexStringBuffer(TRI_string_buffer_t* self, uint64_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends floating point number
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendDoubleStringBuffer(TRI_string_buffer_t* self, double attr) {
|
||||
if (std::isnan(attr)) {
|
||||
return TRI_AppendStringStringBuffer(self, "NaN");
|
||||
|
@ -869,10 +742,7 @@ int TRI_AppendDoubleStringBuffer(TRI_string_buffer_t* self, double attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends time in standard format
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendTimeStringBuffer(TRI_string_buffer_t* self, int32_t attr) {
|
||||
int hour;
|
||||
int minute;
|
||||
|
@ -898,10 +768,7 @@ int TRI_AppendTimeStringBuffer(TRI_string_buffer_t* self, int32_t attr) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv 32-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvInt32StringBuffer(TRI_string_buffer_t* self, int32_t i) {
|
||||
int res;
|
||||
|
||||
|
@ -921,10 +788,7 @@ int TRI_AppendCsvInt32StringBuffer(TRI_string_buffer_t* self, int32_t i) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv unisgned 32-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvUInt32StringBuffer(TRI_string_buffer_t* self, uint32_t i) {
|
||||
int res;
|
||||
|
||||
|
@ -944,10 +808,7 @@ int TRI_AppendCsvUInt32StringBuffer(TRI_string_buffer_t* self, uint32_t i) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv 64-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvInt64StringBuffer(TRI_string_buffer_t* self, int64_t i) {
|
||||
int res;
|
||||
|
||||
|
@ -967,10 +828,7 @@ int TRI_AppendCsvInt64StringBuffer(TRI_string_buffer_t* self, int64_t i) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv unsigned 64-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvUInt64StringBuffer(TRI_string_buffer_t* self, uint64_t i) {
|
||||
int res;
|
||||
|
||||
|
@ -990,10 +848,7 @@ int TRI_AppendCsvUInt64StringBuffer(TRI_string_buffer_t* self, uint64_t i) {
|
|||
return TRI_ERROR_NO_ERROR;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv double
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvDoubleStringBuffer(TRI_string_buffer_t* self, double d) {
|
||||
int res;
|
||||
|
||||
|
|
|
@ -30,10 +30,7 @@
|
|||
|
||||
#include <sstream>
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief string buffer with formatting routines
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
struct TRI_string_buffer_t {
|
||||
TRI_memory_zone_t* _memoryZone;
|
||||
char* _buffer;
|
||||
|
@ -42,172 +39,97 @@ struct TRI_string_buffer_t {
|
|||
bool _initializeMemory;
|
||||
};
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create a new string buffer and initialize it
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_string_buffer_t* TRI_CreateStringBuffer(TRI_memory_zone_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief create a new string buffer and initialize it with a specific size
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_string_buffer_t* TRI_CreateSizedStringBuffer(TRI_memory_zone_t*, size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initializes the string buffer
|
||||
///
|
||||
/// @warning You must call initialize before using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_InitStringBuffer(TRI_string_buffer_t*, TRI_memory_zone_t*,
|
||||
bool initializeMemory = true);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initializes the string buffer with a specific size
|
||||
///
|
||||
/// @warning You must call initialize before using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_InitSizedStringBuffer(TRI_string_buffer_t*, TRI_memory_zone_t*,
|
||||
size_t const, bool initializeMemory = true);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer
|
||||
///
|
||||
/// @warning You must call free or destroy after using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_DestroyStringBuffer(TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer and cleans the buffer
|
||||
///
|
||||
/// @warning You must call free after or destroy using the string buffer.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_AnnihilateStringBuffer(TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer and the pointer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_FreeStringBuffer(TRI_memory_zone_t*, TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief compress the string buffer using deflate
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_DeflateStringBuffer(TRI_string_buffer_t*, size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ensure the string buffer has a specific capacity
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_ReserveStringBuffer(TRI_string_buffer_t*, size_t const);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief swaps content with another string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_SwapStringBuffer(TRI_string_buffer_t*, TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the beginning of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* TRI_BeginStringBuffer(TRI_string_buffer_t const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the end of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* TRI_EndStringBuffer(TRI_string_buffer_t const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns length of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
size_t TRI_LengthStringBuffer(TRI_string_buffer_t const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief increases length of the character buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_IncreaseLengthStringBuffer(TRI_string_buffer_t*, size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns true if buffer is empty
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns capacity of the character buffer
|
||||
size_t TRI_CapacityStringBuffer(TRI_string_buffer_t const*);
|
||||
|
||||
/// @brief returns true if buffer is empty
|
||||
bool TRI_EmptyStringBuffer(TRI_string_buffer_t const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clears the buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_ClearStringBuffer(TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief resets the buffer (without clearing)
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_ResetStringBuffer(TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief steals the buffer of a string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char* TRI_StealStringBuffer(TRI_string_buffer_t*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief copies the string buffer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_CopyStringBuffer(TRI_string_buffer_t*, TRI_string_buffer_t const*);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes the first characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_EraseFrontStringBuffer(TRI_string_buffer_t*, size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes the first characters but does not clear the remaining
|
||||
/// buffer space
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void TRI_MoveFrontStringBuffer(TRI_string_buffer_t*, size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief replaces characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_ReplaceStringStringBuffer(TRI_string_buffer_t*, char const*, size_t);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends character
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCharStringBuffer(TRI_string_buffer_t* self, char chr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendStringStringBuffer(TRI_string_buffer_t* self, char const* str);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendString2StringBuffer(TRI_string_buffer_t* self, char const* str,
|
||||
size_t len);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters but does not check buffer bounds
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
static inline void TRI_AppendCharUnsafeStringBuffer(TRI_string_buffer_t* self, char chr) {
|
||||
#ifdef ARANGODB_ENABLE_MAINTAINER_MODE
|
||||
TRI_ASSERT(self->_len - static_cast<size_t>(self->_current - self->_buffer) > 0);
|
||||
|
@ -241,131 +163,68 @@ static inline void TRI_AppendStringUnsafeStringBuffer(TRI_string_buffer_t* self,
|
|||
self->_current += str.size();
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters but json-encode the string
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendJsonEncodedStringStringBuffer(TRI_string_buffer_t* self,
|
||||
char const* str, size_t, bool);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with two digits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInteger2StringBuffer(TRI_string_buffer_t* self, uint32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with three digits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInteger3StringBuffer(TRI_string_buffer_t* self, uint32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with four digits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInteger4StringBuffer(TRI_string_buffer_t* self, uint32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 8 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt8StringBuffer(TRI_string_buffer_t* self, int8_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 8 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt8StringBuffer(TRI_string_buffer_t* self, uint8_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 16 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt16StringBuffer(TRI_string_buffer_t* self, int16_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt16StringBuffer(TRI_string_buffer_t* self, uint16_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 32 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt32StringBuffer(TRI_string_buffer_t* self, int32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt32StringBuffer(TRI_string_buffer_t* self, uint32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 64 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendInt64StringBuffer(TRI_string_buffer_t* self, int64_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 64 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt64StringBuffer(TRI_string_buffer_t* self, uint64_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits in hex
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt32HexStringBuffer(TRI_string_buffer_t* self, uint32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 64 bits in hex
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendUInt64HexStringBuffer(TRI_string_buffer_t* self, uint64_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends floating point number with 8 bits
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendDoubleStringBuffer(TRI_string_buffer_t* self, double attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends time in standard format
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendTimeStringBuffer(TRI_string_buffer_t* self, int32_t attr);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv 32-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvInt32StringBuffer(TRI_string_buffer_t* self, int32_t i);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv unisgned 32-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvUInt32StringBuffer(TRI_string_buffer_t* self, uint32_t i);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv 64-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvInt64StringBuffer(TRI_string_buffer_t* self, int64_t i);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv unsigned 64-bit integer
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvUInt64StringBuffer(TRI_string_buffer_t* self, uint64_t i);
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv double
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int TRI_AppendCsvDoubleStringBuffer(TRI_string_buffer_t* self, double d);
|
||||
|
||||
// -----------------------------------------------------------------------------
|
||||
|
@ -381,20 +240,14 @@ int TRI_AppendCsvDoubleStringBuffer(TRI_string_buffer_t* self, double d);
|
|||
namespace arangodb {
|
||||
namespace basics {
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief string buffer with formatting routines
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
class StringBuffer {
|
||||
StringBuffer() = delete;
|
||||
StringBuffer(StringBuffer const&) = delete;
|
||||
StringBuffer& operator=(StringBuffer const&) = delete;
|
||||
|
||||
public:
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initializes the string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
explicit StringBuffer(TRI_memory_zone_t* zone, bool initializeMemory = true) {
|
||||
TRI_InitStringBuffer(&_buffer, zone, initializeMemory);
|
||||
|
||||
|
@ -403,10 +256,7 @@ class StringBuffer {
|
|||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief initializes the string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer(TRI_memory_zone_t* zone, size_t initialSize, bool initializeMemory = true) {
|
||||
TRI_InitSizedStringBuffer(&_buffer, zone, initialSize, initializeMemory);
|
||||
|
||||
|
@ -415,38 +265,23 @@ class StringBuffer {
|
|||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
~StringBuffer() { TRI_DestroyStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief frees the string buffer and cleans the buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void annihilate() { TRI_AnnihilateStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief ensure the string buffer has a specific capacity
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int reserve(size_t length) {
|
||||
return TRI_ReserveStringBuffer(&_buffer, length);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief compress the buffer using deflate
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int deflate(size_t bufferSize) {
|
||||
return TRI_DeflateStringBuffer(&_buffer, bufferSize);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief uncompress the buffer into stringstream out, using zlib-inflate
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int inflate(std::stringstream& out, size_t bufferSize = 16384,
|
||||
size_t skip = 0) {
|
||||
z_stream strm;
|
||||
|
@ -519,10 +354,7 @@ class StringBuffer {
|
|||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief uncompress the buffer into StringBuffer out, using zlib-inflate
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
int inflate(arangodb::basics::StringBuffer& out, size_t bufferSize = 16384,
|
||||
size_t skip = 0) {
|
||||
z_stream strm;
|
||||
|
@ -609,152 +441,95 @@ class StringBuffer {
|
|||
return TRI_ERROR_INTERNAL;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns the low level buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_string_buffer_t* stringBuffer() { return &_buffer; }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief swaps content with another string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& swap(StringBuffer* other) {
|
||||
TRI_SwapStringBuffer(&_buffer, &other->_buffer);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* c_str() const { return TRI_BeginStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the beginning of the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* begin() const { return TRI_BeginStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the beginning of the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char* begin() { return const_cast<char*>(TRI_BeginStringBuffer(&_buffer)); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the end of the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char const* end() const { return TRI_EndStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns pointer to the end of the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char* end() { return const_cast<char*>(TRI_EndStringBuffer(&_buffer)); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns length of the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
size_t length() const { return TRI_LengthStringBuffer(&_buffer); }
|
||||
|
||||
/// @brief returns capacity of the character buffer
|
||||
size_t capacity() const { return TRI_CapacityStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief increases length of the character buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void increaseLength(size_t n) { TRI_IncreaseLengthStringBuffer(&_buffer, n); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief returns true if buffer is empty
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
bool empty() const { return TRI_EmptyStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief steals the pointer from the string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
char* steal() { return TRI_StealStringBuffer(&_buffer); }
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief resets the buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& reset() {
|
||||
TRI_ResetStringBuffer(&_buffer);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief clears the buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& clear() {
|
||||
TRI_ClearStringBuffer(&_buffer);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief assigns text from a string
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& operator=(std::string const& str) {
|
||||
TRI_ReplaceStringStringBuffer(&_buffer, str.c_str(), str.length());
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief copies the string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& copy(StringBuffer const& source) {
|
||||
TRI_CopyStringBuffer(&_buffer, &source._buffer);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes the first characters and clears the remaining buffer space
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& erase_front(size_t len) {
|
||||
TRI_EraseFrontStringBuffer(&_buffer, len);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief removes the first characters but does not clear the remaining
|
||||
/// buffer space
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& move_front(size_t len) {
|
||||
TRI_MoveFrontStringBuffer(&_buffer, len);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief replaces characters
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& replaceText(char const* str, size_t len) {
|
||||
TRI_ReplaceStringStringBuffer(&_buffer, str, len);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief replaces characters
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& replaceText(StringBuffer const& text) {
|
||||
TRI_ReplaceStringStringBuffer(&_buffer, text.c_str(), text.length());
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief set the buffer content
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void set(TRI_string_buffer_t const* other) {
|
||||
if (_buffer._buffer != nullptr) {
|
||||
TRI_Free(_buffer._memoryZone, _buffer._buffer);
|
||||
|
@ -766,19 +541,13 @@ class StringBuffer {
|
|||
_buffer._len = other->_len;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief make sure the buffer is null-terminated
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
void ensureNullTerminated () {
|
||||
TRI_AppendCharStringBuffer(&_buffer, '\0');
|
||||
--_buffer._current;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends character
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendChar(char chr) {
|
||||
TRI_AppendCharStringBuffer(&_buffer, chr);
|
||||
return *this;
|
||||
|
@ -788,19 +557,13 @@ class StringBuffer {
|
|||
TRI_AppendCharUnsafeStringBuffer(&_buffer, chr);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends as json-encoded
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendJsonEncoded(char const* str, size_t length) {
|
||||
TRI_AppendJsonEncodedStringStringBuffer(&_buffer, str, length, true);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendText(char const* str, size_t len) {
|
||||
TRI_AppendString2StringBuffer(&_buffer, str, len);
|
||||
return *this;
|
||||
|
@ -810,19 +573,13 @@ class StringBuffer {
|
|||
TRI_AppendStringUnsafeStringBuffer(&_buffer, str, len);
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends characters
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendText(char const* str) {
|
||||
TRI_AppendString2StringBuffer(&_buffer, str, strlen(str));
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends string
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendText(std::string const& str) {
|
||||
TRI_AppendString2StringBuffer(&_buffer, str.c_str(), str.length());
|
||||
return *this;
|
||||
|
@ -832,136 +589,97 @@ class StringBuffer {
|
|||
TRI_AppendStringUnsafeStringBuffer(&_buffer, str.c_str(), str.length());
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends a string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendText(StringBuffer const& text) {
|
||||
TRI_AppendString2StringBuffer(&_buffer, text.c_str(), text.length());
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with two digits
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger2(uint32_t attr) {
|
||||
TRI_AppendInteger2StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with three digits
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger3(uint32_t attr) {
|
||||
TRI_AppendInteger3StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with four digits
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger4(uint32_t attr) {
|
||||
TRI_AppendInteger4StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 8 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(int8_t attr) {
|
||||
TRI_AppendInt8StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 8 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(uint8_t attr) {
|
||||
TRI_AppendUInt8StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 16 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(int16_t attr) {
|
||||
TRI_AppendInt16StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(uint16_t attr) {
|
||||
TRI_AppendUInt16StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 32 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(int32_t attr) {
|
||||
TRI_AppendInt32StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(uint32_t attr) {
|
||||
TRI_AppendUInt32StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends integer with 64 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(int64_t attr) {
|
||||
TRI_AppendInt64StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 64 bits
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendInteger(uint64_t attr) {
|
||||
TRI_AppendUInt64StringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends size_t
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifdef TRI_OVERLOAD_FUNCS_SIZE_T
|
||||
|
||||
StringBuffer& appendInteger(size_t attr) {
|
||||
|
@ -970,58 +688,40 @@ class StringBuffer {
|
|||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 32 bits in hex
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendHex(uint32_t attr) {
|
||||
TRI_AppendUInt32HexStringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends unsigned integer with 64 bits in hex
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendHex(uint64_t attr) {
|
||||
TRI_AppendUInt64HexStringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends size_t in hex
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
#ifdef TRI_OVERLOAD_FUNCS_SIZE_T
|
||||
|
||||
StringBuffer& appendHex(size_t attr) { return appendHex(sizetint_t(attr)); }
|
||||
|
||||
#endif
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends floating point number with 8 bits
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendDecimal(double attr) {
|
||||
TRI_AppendDoubleStringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends time in standard format
|
||||
///
|
||||
/// This method is implemented here in order to allow inlining.
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendTime(int32_t attr) {
|
||||
TRI_AppendTimeStringBuffer(&_buffer, attr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv string
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendCsvString(std::string const& text) {
|
||||
// do not escape here, because some string - i.e. lists of identifier - have
|
||||
// no special characters
|
||||
|
@ -1031,55 +731,37 @@ class StringBuffer {
|
|||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv 32-bit integer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendCsvInteger(int32_t i) {
|
||||
TRI_AppendCsvInt32StringBuffer(&_buffer, i);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv unisgned 32-bit integer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendCsvInteger(uint32_t i) {
|
||||
TRI_AppendCsvUInt32StringBuffer(&_buffer, i);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv 64-bit integer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendCsvInteger(int64_t i) {
|
||||
TRI_AppendCsvInt64StringBuffer(&_buffer, i);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv unsigned 64-bit integer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendCsvInteger(uint64_t i) {
|
||||
TRI_AppendCsvUInt64StringBuffer(&_buffer, i);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief appends csv double
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
StringBuffer& appendCsvDouble(double d) {
|
||||
TRI_AppendCsvDoubleStringBuffer(&_buffer, d);
|
||||
return *this;
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief underlying C string buffer
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
TRI_string_buffer_t _buffer;
|
||||
};
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue