mirror of https://gitee.com/bigwinds/arangodb
rest handler now timeouts when no leadership established and reports 503
This commit is contained in:
commit
fb8173b1ec
13
CHANGELOG
13
CHANGELOG
|
@ -1,7 +1,7 @@
|
|||
devel
|
||||
-----
|
||||
|
||||
* added a memory expection of V8 memory gets to low
|
||||
* added a memory expection in case V8 memory gets too low
|
||||
|
||||
* fixed epoch computation in hybrid logical clock
|
||||
|
||||
|
@ -43,7 +43,16 @@ devel
|
|||
* added module.context.createDocumentationRouter to replace module.context.apiDocumentation
|
||||
|
||||
|
||||
v3.0.5 (XXXX-XX-XX)
|
||||
v3.0.6 (XXXX-XX-XX)
|
||||
-------------------
|
||||
|
||||
* fix execution of AQL traversal expressions when there are multiple
|
||||
conditions that refer to variables set outside the traversal
|
||||
|
||||
* properly return HTTP 503 in JS actions when backend is gone
|
||||
|
||||
|
||||
v3.0.5 (2016-08-18)
|
||||
-------------------
|
||||
|
||||
* execute AQL ternary operator via C++ if possible
|
||||
|
|
|
@ -24,15 +24,15 @@ a *_key* attribute. If no *_key* attribute is provided, ArangoDB will auto-gener
|
|||
a value for *_key* value. Inserting a document will also auto-generate a document
|
||||
revision number for the document.
|
||||
|
||||
```
|
||||
```js
|
||||
FOR i IN 1..100
|
||||
INSERT { value: i } IN numbers
|
||||
```
|
||||
|
||||
When inserting into an [edge collection](../../Manual/Appendix/Glossary.html#edge-collection), it is mandatory to specify the attributes
|
||||
*_from* and *_to* in document:
|
||||
When inserting into an [edge collection](../../Manual/Appendix/Glossary.html#edge-collection),
|
||||
it is mandatory to specify the attributes *_from* and *_to* in document:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
FOR p IN products
|
||||
FILTER u._key == p.recommendedBy
|
||||
|
@ -44,17 +44,25 @@ FOR u IN users
|
|||
*options* can be used to suppress query errors that may occur when violating unique
|
||||
key constraints:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR i IN 1..1000
|
||||
INSERT { _key: CONCAT('test', i), name: "test" } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true }
|
||||
INSERT {
|
||||
_key: CONCAT('test', i),
|
||||
name: "test",
|
||||
foobar: true
|
||||
} INTO users OPTIONS { ignoreErrors: true }
|
||||
```
|
||||
|
||||
To make sure data are durable when an insert query returns, there is the *waitForSync*
|
||||
query option:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR i IN 1..1000
|
||||
INSERT { _key: CONCAT('test', i), name: "test" } WITH { foobar: true } IN users OPTIONS { waitForSync: true }
|
||||
INSERT {
|
||||
_key: CONCAT('test', i),
|
||||
name: "test",
|
||||
foobar: true
|
||||
} INTO users OPTIONS { waitForSync: true }
|
||||
```
|
||||
|
||||
!SUBSECTION Returning the inserted documents
|
||||
|
@ -68,18 +76,16 @@ The documents contained in `NEW` will contain all attributes, even those auto-ge
|
|||
the database (e.g. `_id`, `_key`, `_rev`).
|
||||
|
||||
|
||||
```
|
||||
```js
|
||||
INSERT document IN collection options RETURN NEW
|
||||
```
|
||||
|
||||
Following is an example using a variable named `inserted` to return the inserted
|
||||
documents. For each inserted document, the document key is returned:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR i IN 1..100
|
||||
INSERT { value: i }
|
||||
LET inserted = NEW
|
||||
RETURN inserted._key
|
||||
```
|
||||
|
||||
|
||||
|
|
|
@ -29,6 +29,80 @@ scope the *RETURN* is placed in can be used for the computations.
|
|||
Note: *RETURN* will close the current scope and eliminate all local variables in
|
||||
it.
|
||||
|
||||
[Dynamic attribute names](../Fundamentals/DataTypes.md#objects--documents) are
|
||||
supported as well:
|
||||
|
||||
```js
|
||||
FOR u IN users
|
||||
RETURN { [ u._id ]: u.age }
|
||||
```
|
||||
|
||||
The document *_id* of every user is used as expression to compute the
|
||||
attribute key in this example:
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"users/9883": 32
|
||||
},
|
||||
{
|
||||
"users/9915": 27
|
||||
},
|
||||
{
|
||||
"users/10074": 69
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
The result contains one object per user with a single key/value pair each.
|
||||
This is usually not desired. For a single object, that maps user IDs to ages,
|
||||
the individual results need to be merged and returned with another `RETURN`:
|
||||
|
||||
```js
|
||||
RETURN MERGE(
|
||||
FOR u IN users
|
||||
RETURN { [ u._id ]: u.age }
|
||||
)
|
||||
```
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"users/10074": 69,
|
||||
"users/9883": 32,
|
||||
"users/9915": 27
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
Keep in mind that if the key expression evaluates to the same value multiple
|
||||
times, only one of the key/value pairs with the duplicate name will survive
|
||||
[MERGE()](../Functions/Document.md#merge). To avoid this, you can go without
|
||||
dynamic attribute names, use static names instead and return all document
|
||||
properties as attribute values:
|
||||
|
||||
```js
|
||||
FOR u IN users
|
||||
RETURN { name: u.name, age: u.age }
|
||||
```
|
||||
|
||||
```json
|
||||
[
|
||||
{
|
||||
"name": "John Smith",
|
||||
"age": 32
|
||||
},
|
||||
{
|
||||
"name": "James Hendrix",
|
||||
"age": 69
|
||||
},
|
||||
{
|
||||
"name": "Katie Foster",
|
||||
"age": 27
|
||||
}
|
||||
]
|
||||
```
|
||||
|
||||
!SUBSECTION RETURN DISTINCT
|
||||
|
||||
Since ArangoDB 2.7, *RETURN* can optionally be followed by the *DISTINCT* keyword.
|
||||
|
|
|
@ -23,17 +23,17 @@ be updated. *document* must be a document that contains the attributes and value
|
|||
to be updated. When using the first syntax, *document* must also contain the *_key*
|
||||
attribute to identify the document to be updated.
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE { _key: u._key, name: CONCAT(u.firstName, u.lastName) } IN users
|
||||
UPDATE { _key: u._key, name: CONCAT(u.firstName, " ", u.lastName) } IN users
|
||||
```
|
||||
|
||||
The following query is invalid because it does not contain a *_key* attribute and
|
||||
thus it is not possible to determine the documents to be updated:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE { name: CONCAT(u.firstName, u.lastName) } IN users
|
||||
UPDATE { name: CONCAT(u.firstName, " ", u.lastName) } IN users
|
||||
```
|
||||
|
||||
When using the second syntax, *keyExpression* provides the document identification.
|
||||
|
@ -42,21 +42,21 @@ document, which must contain a *_key* attribute.
|
|||
|
||||
The following queries are equivalent:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u._key WITH { name: CONCAT(u.firstName, u.lastName) } IN users
|
||||
UPDATE u._key WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
|
||||
|
||||
FOR u IN users
|
||||
UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, u.lastName) } IN users
|
||||
UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
|
||||
|
||||
FOR u IN users
|
||||
UPDATE u WITH { name: CONCAT(u.firstName, u.lastName) } IN users
|
||||
UPDATE u WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
|
||||
```
|
||||
|
||||
An update operation may update arbitrary documents which do not need to be identical
|
||||
to the ones produced by a preceding *FOR* statement:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR i IN 1..1000
|
||||
UPDATE CONCAT('test', i) WITH { foobar: true } IN users
|
||||
|
||||
|
@ -65,14 +65,74 @@ FOR u IN users
|
|||
UPDATE u WITH { status: 'inactive' } IN backup
|
||||
```
|
||||
|
||||
!SUBSECTION Using the current value of a document attribute
|
||||
|
||||
The pseudo-variable `OLD` is not supported inside of `WITH` clauses (it is
|
||||
available after `UPDATE`). To access the current attribute value, you can
|
||||
usually refer to a document via the variable of the `FOR` loop, which is used
|
||||
to iterate over a collection:
|
||||
|
||||
```js
|
||||
FOR doc IN users
|
||||
UPDATE doc WITH {
|
||||
fullName: CONCAT(doc.firstName, " ", doc.lastName)
|
||||
} IN users
|
||||
```
|
||||
|
||||
If there is no loop, because a single document is updated only, then there
|
||||
might not be a variable like above (`doc`), which would let you refer to the
|
||||
document which is being updated:
|
||||
|
||||
```js
|
||||
UPDATE "users/john" WITH { ... } IN users
|
||||
```
|
||||
|
||||
To access the current value in this situation, the document has to be retrieved
|
||||
and stored in a variable first:
|
||||
|
||||
```js
|
||||
LET doc = DOCUMENT("users/john")
|
||||
UPDATE doc WITH {
|
||||
fullName: CONCAT(doc.firstName, " ", doc.lastName)
|
||||
} IN users
|
||||
```
|
||||
|
||||
An existing attribute can be modified based on its current value this way,
|
||||
to increment a counter for instance:
|
||||
|
||||
```js
|
||||
UPDATE doc WITH {
|
||||
karma: doc.karma + 1
|
||||
} IN users
|
||||
```
|
||||
|
||||
If the attribute `karma` doesn't exist yet, `doc.karma` is evaluated to *null*.
|
||||
The expression `null + 1` results in the new attribute `karma` being set to *1*.
|
||||
If the attribute does exist, then it is increased by *1*.
|
||||
|
||||
Arrays can be mutated too of course:
|
||||
|
||||
```js
|
||||
UPDATE doc WITH {
|
||||
hobbies: PUSH(doc.hobbies, "swimming")
|
||||
} IN users
|
||||
```
|
||||
|
||||
If the attribute `hobbies` doesn't exist yet, it is conveniently initialized
|
||||
as `[ "swimming" ]` and otherwise extended.
|
||||
|
||||
!SUBSECTION Setting query options
|
||||
|
||||
*options* can be used to suppress query errors that may occur when trying to
|
||||
update non-existing documents or violating unique key constraints:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR i IN 1..1000
|
||||
UPDATE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true }
|
||||
UPDATE {
|
||||
_key: CONCAT('test', i)
|
||||
} WITH {
|
||||
foobar: true
|
||||
} IN users OPTIONS { ignoreErrors: true }
|
||||
```
|
||||
|
||||
An update operation will only update the attributes specified in *document* and
|
||||
|
@ -84,9 +144,12 @@ When updating an attribute with a null value, ArangoDB will not remove the attri
|
|||
from the document but store a null value for it. To get rid of attributes in an update
|
||||
operation, set them to null and provide the *keepNull* option:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { foobar: true, notNeeded: null } IN users OPTIONS { keepNull: false }
|
||||
UPDATE u WITH {
|
||||
foobar: true,
|
||||
notNeeded: null
|
||||
} IN users OPTIONS { keepNull: false }
|
||||
```
|
||||
|
||||
The above query will remove the *notNeeded* attribute from the documents and update
|
||||
|
@ -100,17 +163,21 @@ The following query will set the updated document's *name* attribute to the exac
|
|||
same value that is specified in the query. This is due to the *mergeObjects* option
|
||||
being set to *false*:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { name: { first: "foo", middle: "b.", last: "baz" } } IN users OPTIONS { mergeObjects: false }
|
||||
UPDATE u WITH {
|
||||
name: { first: "foo", middle: "b.", last: "baz" }
|
||||
} IN users OPTIONS { mergeObjects: false }
|
||||
```
|
||||
|
||||
Contrary, the following query will merge the contents of the *name* attribute in the
|
||||
original document with the value specified in the query:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { name: { first: "foo", middle: "b.", last: "baz" } } IN users OPTIONS { mergeObjects: true }
|
||||
UPDATE u WITH {
|
||||
name: { first: "foo", middle: "b.", last: "baz" }
|
||||
} IN users OPTIONS { mergeObjects: true }
|
||||
```
|
||||
|
||||
Attributes in *name* that are present in the to-be-updated document but not in the
|
||||
|
@ -123,9 +190,11 @@ explicitly.
|
|||
To make sure data are durable when an update query returns, there is the *waitForSync*
|
||||
query option:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { foobar: true } IN users OPTIONS { waitForSync: true }
|
||||
UPDATE u WITH {
|
||||
foobar: true
|
||||
} IN users OPTIONS { waitForSync: true }
|
||||
```
|
||||
|
||||
!SUBSECTION Returning the modified documents
|
||||
|
@ -149,7 +218,7 @@ UPDATE keyExpression WITH document IN collection options RETURN NEW
|
|||
Following is an example using a variable named `previous` to capture the original
|
||||
documents before modification. For each modified document, the document key is returned.
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { value: "test" }
|
||||
LET previous = OLD
|
||||
|
@ -159,7 +228,7 @@ FOR u IN users
|
|||
The following query uses the `NEW` pseudo-value to return the updated documents,
|
||||
without some of the system attributes:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { value: "test" }
|
||||
LET updated = NEW
|
||||
|
@ -168,9 +237,8 @@ FOR u IN users
|
|||
|
||||
It is also possible to return both `OLD` and `NEW`:
|
||||
|
||||
```
|
||||
```js
|
||||
FOR u IN users
|
||||
UPDATE u WITH { value: "test" }
|
||||
RETURN { before: OLD, after: NEW }
|
||||
```
|
||||
|
||||
|
|
|
@ -11,8 +11,8 @@ the server on port 8529 on the localhost. For more information see the
|
|||
unix> ./arangosh --server.endpoint tcp://127.0.0.1:8529 --server.username root
|
||||
```
|
||||
|
||||
The shell will print its own version number and – if successfully connected
|
||||
to a server – the version number of the ArangoDB server.
|
||||
The shell will print its own version number and if successfully connected
|
||||
to a server the version number of the ArangoDB server.
|
||||
|
||||
!SECTION Command-Line Options
|
||||
|
||||
|
@ -73,3 +73,22 @@ for (i = 0; i < 100000; i++) {
|
|||
|
||||
Since the *arangosh* version will be doing around 100k HTTP requests, and the
|
||||
*arangod* version will directly write to the database.
|
||||
|
||||
!SECTION Using `arangosh` via unix shebang mechanisms
|
||||
In unix operating systems you can start scripts by specifying the interpreter in the first line of the script.
|
||||
This is commonly called `shebang` or `hash bang`. You can also do that with `arangosh`, i.e. create `~/test.js`:
|
||||
|
||||
#!/usr/bin/arangosh --javascript.execute
|
||||
require("internal").print("hello world")
|
||||
db._query("FOR x IN test RETURN x").toArray()
|
||||
|
||||
Note that the first line has to end with a blank in order to make it work.
|
||||
Mark it executable to the OS:
|
||||
|
||||
#> chmod a+x ~/test.js
|
||||
|
||||
and finaly try it out:
|
||||
|
||||
#> ~/test.js
|
||||
|
||||
|
||||
|
|
|
@ -87,6 +87,25 @@ So, if your edges have about a dozen different types, it's okay to choose the co
|
|||
the `FILTER` approach is preferred. You can still use `FILTER` operations on edges of course. You can get rid
|
||||
of a `FILTER` on the `type` with the former approach, everything else can stay the same.
|
||||
|
||||
!SUBSECTION Which part of my data is an Edge and which a Vertex?
|
||||
A Vertex documents would be the main objects in your data model. Edges connect and classify relations between these Objects.
|
||||
|
||||
!SUBSUBSECTION Vertices
|
||||
So lets say we have two vertex collections, `Users` and `Groups`. Documents in the `Groups` collection contain the attributes
|
||||
of the Group, i.e. when it was founded, its subject, an icon URL and so on. `Users` documents contain the data specific to a
|
||||
user - like all names, birthdays, Avatar URLs, hobbies...
|
||||
|
||||
!SUBSUBSECTION Edges
|
||||
Now we use one edge collection to create relation between users and groups. Since multiple users may be in an arbitrary number of
|
||||
groups, this is an **M:N** relation (see above). So, we have an edge collection `UsersInGroups` with i.e. one edge
|
||||
with `_from` pointing to `Users/John` and `_to` pointing to `Groups/BowlingGroupHappyPin`.
|
||||
This makes the user **John** a member of the group **Bowling Group Happy Pin**.
|
||||
Attributes of this relation may contain qualifiers to this relation, like the permissions of **John** in this group, the date when he joined the group etc.
|
||||
|
||||
!SUBSUBSECTION Advantages of this aproach
|
||||
Graphs give you the advantage of not just being able to have a fixed number of **m:n** relations in a row, but an arbitrary number.
|
||||
|
||||
|
||||
!SUBSECTION Backup and restore
|
||||
|
||||
For sure you want to have backups of your graph data, you can use [Arangodump](../Administration/Arangodump.md) to create the backup,
|
||||
|
|
|
@ -138,6 +138,7 @@ MAKE_CMD_PREFIX=""
|
|||
CONFIGURE_OPTIONS="-DCMAKE_INSTALL_PREFIX=/ $CMAKE_OPENSSL"
|
||||
MAINTAINER_MODE="-DUSE_MAINTAINER_MODE=off"
|
||||
|
||||
TAR_SUFFIX=""
|
||||
TARGET_DIR=""
|
||||
CLANG36=0
|
||||
CLANG=0
|
||||
|
@ -211,11 +212,13 @@ while [ $# -gt 0 ]; do
|
|||
;;
|
||||
|
||||
--sanitize)
|
||||
TAR_SUFFIX="-sanitize"
|
||||
SANITIZE=1
|
||||
shift
|
||||
;;
|
||||
|
||||
--coverage)
|
||||
TAR_SUFFIX="-coverage"
|
||||
COVERAGE=1
|
||||
shift
|
||||
;;
|
||||
|
@ -419,7 +422,7 @@ if test -n "${TARGET_DIR}"; then
|
|||
echo "building distribution tarball"
|
||||
mkdir -p "${TARGET_DIR}"
|
||||
dir="${TARGET_DIR}"
|
||||
TARFILE=arangodb.tar.gz
|
||||
TARFILE=arangodb-`uname`${TAR_SUFFIX}.tar.gz
|
||||
TARFILE_TMP=`pwd`/arangodb.tar.$$
|
||||
|
||||
mkdir -p ${dir}
|
||||
|
|
|
@ -348,7 +348,7 @@ priv_rpc_ret_t Agent::sendAppendEntriesRPC(std::string const& follower_id) {
|
|||
arangodb::GeneralRequest::RequestType::POST, path.str(),
|
||||
std::make_shared<std::string>(builder.toJson()), headerFields,
|
||||
std::make_shared<AgentCallback>(this, follower_id, highest),
|
||||
0.5*_config.minPing(), true, 0.75*_config.minPing());
|
||||
0.1*_config.minPing(), true, 0.05*_config.minPing());
|
||||
|
||||
_lastSent[follower_id] = std::chrono::system_clock::now();
|
||||
_lastHighest[follower_id] = highest;
|
||||
|
|
|
@ -415,7 +415,7 @@ void Constituent::beginShutdown() {
|
|||
/// Start operation
|
||||
bool Constituent::start(TRI_vocbase_t* vocbase,
|
||||
aql::QueryRegistry* queryRegistry) {
|
||||
|
||||
TRI_ASSERT(vocbase != nullptr);
|
||||
_vocbase = vocbase;
|
||||
_queryRegistry = queryRegistry;
|
||||
|
||||
|
@ -429,6 +429,7 @@ void Constituent::run() {
|
|||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Pool complete. Starting constituent personality";
|
||||
|
||||
_id = _agent->config().id();
|
||||
|
||||
TRI_ASSERT(_vocbase != nullptr);
|
||||
|
|
|
@ -133,15 +133,26 @@ RestHandler::status RestAgencyHandler::handleWrite() {
|
|||
if (query->slice().length() == 0) {
|
||||
Builder body;
|
||||
body.openObject();
|
||||
body.add(
|
||||
"message", VPackValue("Empty request."));
|
||||
body.add("message", VPackValue("Empty request."));
|
||||
body.close();
|
||||
generateResult(GeneralResponse::ResponseCode::BAD, body.slice());
|
||||
return status::DONE;
|
||||
}
|
||||
|
||||
auto s = std::chrono::system_clock::now(); // Leadership established?
|
||||
std::chrono::seconds timeout(1);
|
||||
while(_agent->size() > 1 && _agent->leaderID() == "") {
|
||||
std::this_thread::sleep_for(duration_t(100));
|
||||
if ((std::chrono::system_clock::now()-s) > timeout) {
|
||||
Builder body;
|
||||
body.openObject();
|
||||
body.add("message", VPackValue("No leader"));
|
||||
body.close();
|
||||
generateResult(GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE,
|
||||
body.slice());
|
||||
LOG_TOPIC(ERR, Logger::AGENCY) << "We don't know who the leader is";
|
||||
return status::DONE;
|
||||
}
|
||||
}
|
||||
|
||||
write_ret_t ret = _agent->write(query);
|
||||
|
@ -215,8 +226,20 @@ inline RestHandler::status RestAgencyHandler::handleRead() {
|
|||
return status::DONE;
|
||||
}
|
||||
|
||||
auto s = std::chrono::system_clock::now(); // Leadership established?
|
||||
std::chrono::seconds timeout(1);
|
||||
while(_agent->size() > 1 && _agent->leaderID() == "") {
|
||||
std::this_thread::sleep_for(duration_t(100));
|
||||
if ((std::chrono::system_clock::now()-s) > timeout) {
|
||||
Builder body;
|
||||
body.openObject();
|
||||
body.add("message", VPackValue("No leader"));
|
||||
body.close();
|
||||
generateResult(GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE,
|
||||
body.slice());
|
||||
LOG_TOPIC(ERR, Logger::AGENCY) << "We don't know who the leader is";
|
||||
return status::DONE;
|
||||
}
|
||||
}
|
||||
|
||||
read_ret_t ret = _agent->read(query);
|
||||
|
|
|
@ -132,7 +132,7 @@ std::vector<check_t> Supervision::checkDBServers() {
|
|||
report->add("LastHeartbeatAcked",
|
||||
VPackValue(
|
||||
timepointToString(std::chrono::system_clock::now())));
|
||||
report->add("Status", VPackValue("GOOD"));
|
||||
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_GOOD));
|
||||
} else {
|
||||
std::chrono::seconds t{0};
|
||||
t = std::chrono::duration_cast<std::chrono::seconds>(
|
||||
|
@ -254,17 +254,17 @@ std::vector<check_t> Supervision::checkCoordinators() {
|
|||
report->add("LastHeartbeatAcked",
|
||||
VPackValue(
|
||||
timepointToString(std::chrono::system_clock::now())));
|
||||
report->add("Status", VPackValue("GOOD"));
|
||||
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_GOOD));
|
||||
} else {
|
||||
std::chrono::seconds t{0};
|
||||
t = std::chrono::duration_cast<std::chrono::seconds>(
|
||||
std::chrono::system_clock::now()-stringToTimepoint(lastHeartbeatAcked));
|
||||
if (t.count() > _gracePeriod) { // Failure
|
||||
if (lastStatus == "BAD") {
|
||||
report->add("Status", VPackValue("FAILED"));
|
||||
if (lastStatus == Supervision::HEALTH_STATUS_BAD) {
|
||||
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_FAILED));
|
||||
}
|
||||
} else {
|
||||
report->add("Status", VPackValue("BAD"));
|
||||
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_BAD));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -346,6 +346,11 @@ void Supervision::run() {
|
|||
|
||||
while (!this->isStopping()) {
|
||||
updateSnapshot();
|
||||
// mop: always do health checks so shutdown is able to detect if a server failed otherwise
|
||||
if (_agent->leading()) {
|
||||
doChecks();
|
||||
}
|
||||
|
||||
if (isShuttingDown()) {
|
||||
handleShutdown();
|
||||
} else if (_agent->leading()) {
|
||||
|
@ -365,16 +370,33 @@ bool Supervision::isShuttingDown() {
|
|||
}
|
||||
}
|
||||
|
||||
bool Supervision::serverGood(const std::string& serverName) {
|
||||
try {
|
||||
const std::string serverStatus(healthPrefix + serverName + "/Status");
|
||||
const std::string status = _snapshot(serverStatus).getString();
|
||||
return status == Supervision::HEALTH_STATUS_GOOD;
|
||||
} catch (...) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void Supervision::handleShutdown() {
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Initiating shutdown";
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Waiting for clients to shut down";
|
||||
Node::Children const& serversRegistered = _snapshot(currentServersRegisteredPrefix).children();
|
||||
bool serversCleared = true;
|
||||
for (auto const& server : serversRegistered) {
|
||||
if (server.first == "Version") {
|
||||
continue;
|
||||
}
|
||||
|
||||
LOG_TOPIC(DEBUG, Logger::AGENCY)
|
||||
<< "Waiting for " << server.first << " to shutdown";
|
||||
|
||||
if (!serverGood(server.first)) {
|
||||
LOG_TOPIC(WARN, Logger::AGENCY)
|
||||
<< "Server " << server.first << " did not shutdown properly it seems!";
|
||||
continue;
|
||||
}
|
||||
serversCleared = false;
|
||||
}
|
||||
|
||||
|
@ -390,7 +412,6 @@ bool Supervision::handleJobs() {
|
|||
}
|
||||
|
||||
// Do supervision
|
||||
doChecks();
|
||||
shrinkCluster();
|
||||
workJobs();
|
||||
|
||||
|
@ -398,7 +419,6 @@ bool Supervision::handleJobs() {
|
|||
}
|
||||
|
||||
void Supervision::workJobs() {
|
||||
|
||||
Node::Children const& todos = _snapshot(toDoPrefix).children();
|
||||
Node::Children const& pends = _snapshot(pendingPrefix).children();
|
||||
|
||||
|
|
|
@ -108,6 +108,9 @@ class Supervision : public arangodb::Thread {
|
|||
void wakeUp();
|
||||
|
||||
private:
|
||||
static constexpr const char* HEALTH_STATUS_GOOD = "GOOD";
|
||||
static constexpr const char* HEALTH_STATUS_BAD = "BAD";
|
||||
static constexpr const char* HEALTH_STATUS_FAILED = "FAILED";
|
||||
|
||||
/// @brief Update agency prefix from agency itself
|
||||
bool updateAgencyPrefix (size_t nTries = 10, int intervalSec = 1);
|
||||
|
@ -165,6 +168,8 @@ class Supervision : public arangodb::Thread {
|
|||
uint64_t _jobId;
|
||||
uint64_t _jobIdMax;
|
||||
|
||||
bool serverGood(const std::string&);
|
||||
|
||||
static std::string _agencyPrefix;
|
||||
};
|
||||
|
||||
|
|
|
@ -801,7 +801,7 @@ bool AstNode::isOnlyEqualityMatch() const {
|
|||
}
|
||||
|
||||
for (size_t i = 0; i < numMembers(); ++i) {
|
||||
auto op = getMember(i);
|
||||
auto op = getMemberUnchecked(i);
|
||||
if (op->type != arangodb::aql::NODE_TYPE_OPERATOR_BINARY_EQ) {
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -1160,7 +1160,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
|
|||
|
||||
if (it2 == varInfo.end()) {
|
||||
// report an error here to prevent crashing
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "missing variable #" + std::to_string(v->id) + " while planning registers");
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "missing variable #" + std::to_string(v->id) + " for node " + en->getTypeString() + " while planning registers");
|
||||
}
|
||||
|
||||
// finally adjust the variable inside the IN calculation
|
||||
|
|
|
@ -844,6 +844,7 @@ AqlValue Expression::executeSimpleExpressionReference(
|
|||
std::string msg("variable not found '");
|
||||
msg.append(v->name);
|
||||
msg.append("' in executeSimpleExpression()");
|
||||
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg.c_str());
|
||||
}
|
||||
|
||||
|
|
|
@ -1798,7 +1798,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
|
|||
auto const& indexes = indexNode->getIndexes();
|
||||
auto cond = indexNode->condition();
|
||||
TRI_ASSERT(cond != nullptr);
|
||||
|
||||
|
||||
Variable const* outVariable = indexNode->outVariable();
|
||||
TRI_ASSERT(outVariable != nullptr);
|
||||
|
||||
|
@ -1817,7 +1817,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
|
|||
return true;
|
||||
}
|
||||
|
||||
if (!isSparse) {
|
||||
if (isSparse) {
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -1831,6 +1831,8 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
|
|||
// all indexes use the same attributes and index conditions guarantee
|
||||
// sorted output
|
||||
}
|
||||
|
||||
TRI_ASSERT(indexes.size() == 1 || cond->isSorted());
|
||||
|
||||
// if we get here, we either have one index or multiple indexes on the same
|
||||
// attributes
|
||||
|
@ -1878,11 +1880,16 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
|
|||
|
||||
if (numCovered == sortCondition.numAttributes() &&
|
||||
sortCondition.isUnidirectional() &&
|
||||
(isSorted || fields.size() == sortCondition.numAttributes())) {
|
||||
(isSorted || fields.size() >= sortCondition.numAttributes())) {
|
||||
// no need to sort
|
||||
_plan->unlinkNode(_plan->getNodeById(_sortNode->id()));
|
||||
indexNode->reverse(sortCondition.isDescending());
|
||||
_modified = true;
|
||||
} else if (numCovered > 0 && sortCondition.isUnidirectional()) {
|
||||
// remove the first few attributes if they are constant
|
||||
SortNode* sortNode = static_cast<SortNode*>(_plan->getNodeById(_sortNode->id()));
|
||||
sortNode->removeConditions(numCovered);
|
||||
_modified = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -429,6 +429,6 @@ double ShortestPathNode::estimateCost(size_t& nrItems) const {
|
|||
|
||||
edgesCount += edges;
|
||||
}
|
||||
nrItems = edgesCount + static_cast<size_t>(log(nodesEstimate) * nodesEstimate);
|
||||
nrItems = edgesCount + static_cast<size_t>(std::log2(nodesEstimate) * nodesEstimate);
|
||||
return depCost + nrItems;
|
||||
}
|
||||
|
|
|
@ -160,21 +160,21 @@ size_t SortCondition::coveredAttributes(
|
|||
// no match
|
||||
bool isConstant = false;
|
||||
|
||||
if (IsContained(_constAttributes, indexAttributes[i])) {
|
||||
if (IsContained(indexAttributes, field.second) &&
|
||||
IsContained(_constAttributes, field.second)) {
|
||||
// no field match, but a constant attribute
|
||||
isConstant = true;
|
||||
++fieldsPosition;
|
||||
++numCovered;
|
||||
}
|
||||
|
||||
if (!isConstant &&
|
||||
IsContained(_constAttributes, indexAttributes[i])) {
|
||||
// no field match, but a constant attribute
|
||||
isConstant = true;
|
||||
++i; // next index field
|
||||
}
|
||||
|
||||
if (!isConstant) {
|
||||
if (IsContained(indexAttributes, field.second) &&
|
||||
IsContained(_constAttributes, field.second)) {
|
||||
// no field match, but a constant attribute
|
||||
isConstant = true;
|
||||
++fieldsPosition;
|
||||
++numCovered;
|
||||
}
|
||||
}
|
||||
|
||||
if (!isConstant) {
|
||||
break;
|
||||
|
|
|
@ -121,6 +121,12 @@ bool SortNode::simplify(ExecutionPlan* plan) {
|
|||
|
||||
return _elements.empty();
|
||||
}
|
||||
|
||||
void SortNode::removeConditions(size_t count) {
|
||||
TRI_ASSERT(_elements.size() > count);
|
||||
TRI_ASSERT(count > 0);
|
||||
_elements.erase(_elements.begin(), _elements.begin() + count);
|
||||
}
|
||||
|
||||
/// @brief returns all sort information
|
||||
SortInformation SortNode::getSortInformation(
|
||||
|
@ -178,5 +184,5 @@ double SortNode::estimateCost(size_t& nrItems) const {
|
|||
if (nrItems <= 3.0) {
|
||||
return depCost + nrItems;
|
||||
}
|
||||
return depCost + nrItems * log(static_cast<double>(nrItems));
|
||||
return depCost + nrItems * std::log2(static_cast<double>(nrItems));
|
||||
}
|
||||
|
|
|
@ -113,6 +113,11 @@ class SortNode : public ExecutionNode {
|
|||
/// simplification, and false otherwise
|
||||
bool simplify(ExecutionPlan*);
|
||||
|
||||
/// @brief removes the first count conditions from the sort condition
|
||||
/// this can be used if the first conditions of the condition are constant
|
||||
/// values (e.g. when a FILTER condition exists that guarantees this)
|
||||
void removeConditions(size_t count);
|
||||
|
||||
private:
|
||||
/// @brief pairs, consisting of variable and sort direction
|
||||
/// (true = ascending | false = descending)
|
||||
|
|
|
@ -84,6 +84,14 @@ TraversalBlock::TraversalBlock(ExecutionEngine* engine, TraversalNode const* ep)
|
|||
TRI_ASSERT(it->second.registerId < ExecutionNode::MaxRegisterId);
|
||||
inRegsCur.emplace_back(it->second.registerId);
|
||||
}
|
||||
|
||||
for (auto const& v : ep->_conditionVariables) {
|
||||
inVarsCur.emplace_back(v);
|
||||
auto it = ep->getRegisterPlan()->varInfo.find(v->id);
|
||||
TRI_ASSERT(it != ep->getRegisterPlan()->varInfo.end());
|
||||
TRI_ASSERT(it->second.registerId < ExecutionNode::MaxRegisterId);
|
||||
inRegsCur.emplace_back(it->second.registerId);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -170,6 +178,7 @@ int TraversalBlock::initialize() {
|
|||
void TraversalBlock::executeExpressions() {
|
||||
DEBUG_BEGIN_BLOCK();
|
||||
AqlItemBlock* cur = _buffer.front();
|
||||
size_t index = 0;
|
||||
for (auto& map : *_expressions) {
|
||||
for (size_t i = 0; i < map.second.size(); ++i) {
|
||||
// Right now no inVars are allowed.
|
||||
|
@ -178,8 +187,8 @@ void TraversalBlock::executeExpressions() {
|
|||
if (it != nullptr && it->expression != nullptr) {
|
||||
// inVars and inRegs needs fixx
|
||||
bool mustDestroy;
|
||||
AqlValue a = it->expression->execute(_trx, cur, _pos, _inVars[i],
|
||||
_inRegs[i], mustDestroy);
|
||||
AqlValue a = it->expression->execute(_trx, cur, _pos, _inVars[index],
|
||||
_inRegs[index], mustDestroy);
|
||||
|
||||
AqlValueGuard guard(a, mustDestroy);
|
||||
|
||||
|
@ -196,6 +205,7 @@ void TraversalBlock::executeExpressions() {
|
|||
|
||||
it->compareTo.reset(builder);
|
||||
}
|
||||
++index;
|
||||
}
|
||||
}
|
||||
throwIfKilled(); // check if we were aborted
|
||||
|
|
|
@ -702,11 +702,10 @@ void TraversalNode::setCondition(arangodb::aql::Condition* condition) {
|
|||
Ast::getReferencedVariables(condition->root(), varsUsedByCondition);
|
||||
|
||||
for (auto const& oneVar : varsUsedByCondition) {
|
||||
if ((_vertexOutVariable != nullptr &&
|
||||
oneVar->id != _vertexOutVariable->id) &&
|
||||
(_edgeOutVariable != nullptr && oneVar->id != _edgeOutVariable->id) &&
|
||||
(_pathOutVariable != nullptr && oneVar->id != _pathOutVariable->id) &&
|
||||
(_inVariable != nullptr && oneVar->id != _inVariable->id)) {
|
||||
if ((_vertexOutVariable == nullptr || oneVar->id != _vertexOutVariable->id) &&
|
||||
(_edgeOutVariable == nullptr || oneVar->id != _edgeOutVariable->id) &&
|
||||
(_pathOutVariable == nullptr || oneVar->id != _pathOutVariable->id) &&
|
||||
(_inVariable == nullptr || oneVar->id != _inVariable->id)) {
|
||||
_conditionVariables.emplace_back(oneVar);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -142,7 +142,7 @@ void ImportFeature::collectOptions(
|
|||
"action to perform when a unique key constraint "
|
||||
"violation occurs. Possible values: " +
|
||||
actionsJoined,
|
||||
new DiscreteValuesParameter<StringParameter>(&_typeImport, actions));
|
||||
new DiscreteValuesParameter<StringParameter>(&_onDuplicateAction, actions));
|
||||
}
|
||||
|
||||
void ImportFeature::validateOptions(
|
||||
|
|
|
@ -645,6 +645,10 @@ function runThere (options, instanceInfo, file) {
|
|||
'return runTest(' + JSON.stringify(file) + ', true);';
|
||||
}
|
||||
|
||||
if (options.propagateInstanceInfo) {
|
||||
testCode = 'global.instanceInfo = ' + JSON.stringify(instanceInfo) + ';\n' + testCode;
|
||||
}
|
||||
|
||||
let httpOptions = makeAuthorizationHeaders(options);
|
||||
httpOptions.method = 'POST';
|
||||
httpOptions.timeout = 3600;
|
||||
|
@ -3370,6 +3374,7 @@ testFuncs.replication_sync = function (options) {
|
|||
testFuncs.resilience = function (options) {
|
||||
findTests();
|
||||
options.cluster = true;
|
||||
options.propagateInstanceInfo = true;
|
||||
if (options.dbServers < 5) {
|
||||
options.dbServers = 5;
|
||||
}
|
||||
|
@ -3431,6 +3436,7 @@ testFuncs.server_http = function (options) {
|
|||
|
||||
testFuncs.shell_server = function (options) {
|
||||
findTests();
|
||||
options.propagateInstanceInfo = true;
|
||||
|
||||
return performTests(options, testsCases.server, 'shell_server', runThere);
|
||||
};
|
||||
|
|
|
@ -43,42 +43,57 @@ var lessons = [
|
|||
' number = 123;\n' +
|
||||
' number = number * 10;'
|
||||
},
|
||||
{
|
||||
title: 'Shell History',
|
||||
text: 'You can access previously run commands using the up and down keys.\n' +
|
||||
"It saves you from retyping 'tutorial' every time for instance."
|
||||
},
|
||||
{
|
||||
title: 'Running Complex Instructions',
|
||||
text: 'You can also run more complex instructions, such as for loops:\n\n' +
|
||||
' for (i = 0; i < 10; i++) { number = number + 1; }'
|
||||
' for (var i = 0; i < 10; i++) { number = number + 1; }'
|
||||
},
|
||||
{
|
||||
title: 'Printing Results',
|
||||
text: 'As you can see, the result of the last command executed is printed automatically. ' +
|
||||
text: 'As you see, the result of the last command executed is printed automatically.\n' +
|
||||
'To explicitly print a value at any other time, there is the print function:\n\n' +
|
||||
' for (i = 0; i < 5; ++i) { print("I am a JavaScript shell"); }'
|
||||
' for (var i = 0; i < 5; ++i) { print("I am a JavaScript shell"); }'
|
||||
},
|
||||
{
|
||||
title: 'Creating Collections',
|
||||
text: 'ArangoDB is a document database. This means that we store data as documents ' +
|
||||
"(which are similar to JavaScript objects) in so-called 'collections'. " +
|
||||
text: 'ArangoDB is primarily a document database. This means that we store data as\n' +
|
||||
"documents (which are similar to JavaScript objects) in so-called 'collections'.\n" +
|
||||
"Let's create a collection named 'places' now:\n\n" +
|
||||
" db._create('places');\n\n" +
|
||||
'Note: each collection is identified by a unique name. Trying to create a ' +
|
||||
'Note: each collection is identified by a unique name. Trying to create a\n' +
|
||||
'collection that already exists will produce an error.'
|
||||
},
|
||||
{
|
||||
title: 'Displaying Collections',
|
||||
text: 'Now you can take a look at the collection(s) you just created:\n\n' +
|
||||
' db._collections();\n\n' +
|
||||
"Please note that all collections will be returned, including ArangoDB's pre-defined " +
|
||||
'system collections.'
|
||||
"Please note that all collections will be returned, including ArangoDB's\n" +
|
||||
'pre-defined system collections.'
|
||||
},
|
||||
{
|
||||
title: 'Accessing a single collection',
|
||||
text: 'If you want to access a particular collection, you can either write:\n\n' +
|
||||
' db.places;\n\n' +
|
||||
'or the more elaborate alternative:\n\n' +
|
||||
" db._collection('places');\n\n" +
|
||||
'Both return a collection object (if the specified collection exists).'
|
||||
},
|
||||
{
|
||||
title: 'Creating Documents',
|
||||
text: "Now we have a collection, but it is empty. So let's create some documents!\n\n" +
|
||||
text: "We have a collection, but it is empty. So let's create some documents!\n\n" +
|
||||
' db.places.save({ _key : "foo", city : "foo-city" });\n' +
|
||||
' for (i = 0; i <= 10; i++) { db.places.save({ _key: "example" + i, zipcode: i }) };'
|
||||
' for (var i = 0; i <= 10; i++) {\n' +
|
||||
' db.places.save({ _key: "example" + i, zipcode: i })\n' +
|
||||
' };'
|
||||
},
|
||||
{
|
||||
title: 'Displaying All Documents',
|
||||
text: 'You want to take a look at your docs? No problem:\n\n' +
|
||||
text: 'You want to take a look at your documents? No problem:\n\n' +
|
||||
' db.places.toArray();'
|
||||
},
|
||||
{
|
||||
|
@ -89,37 +104,40 @@ var lessons = [
|
|||
{
|
||||
title: 'Retrieving Single Documents',
|
||||
text: "As you can see, each document has some meta attributes '_id', '_key' and '_rev'.\n" +
|
||||
"The '_key' attribute can be used to quickly retrieve a single document from " +
|
||||
"The '_key' attribute can be used to quickly retrieve a single document from\n" +
|
||||
'a collection:\n\n' +
|
||||
' db.places.document("foo");\n' +
|
||||
' db.places.document("example5");'
|
||||
},
|
||||
{
|
||||
title: 'Retrieving Single Documents',
|
||||
text: "The '_id' attribute can also be used to retrieve documents using the 'db' object:\n\n" +
|
||||
text: "The '_id' attribute can also be used to retrieve documents using the\n" +
|
||||
"'db' object:\n\n" +
|
||||
' db._document("places/foo");\n' +
|
||||
' db._document("places/example5");'
|
||||
},
|
||||
{
|
||||
title: 'Modifying Documents',
|
||||
text: 'You can modify existing documents. Try to add a new attribute to a document and ' +
|
||||
text: 'You can modify existing documents. Try to add a new attribute to a document and\n' +
|
||||
'verify whether it has been added:\n\n' +
|
||||
' db._update("places/foo", { zipcode: 39535 });\n' +
|
||||
' db._document("places/foo");'
|
||||
},
|
||||
{
|
||||
title: 'Document Revisions',
|
||||
text: "Note that after updating the document, its '_rev' attribute changed automatically.\n" +
|
||||
"The '_rev' attribute contains a document revision number, and it can be used for " +
|
||||
"conditional modifications. Here's an example of how to avoid lost updates in case " +
|
||||
'multiple clients are accessing the documents in parallel:\n\n' +
|
||||
text: "Note that after updating the document, its '_rev' attribute changed\n" +
|
||||
'automatically.\n\n' +
|
||||
"The '_rev' attribute contains a document revision number, and it can be used\n" +
|
||||
"for conditional modifications. Here's an example of how to avoid lost updates\n" +
|
||||
'in case multiple clients are accessing the documents in parallel:\n\n' +
|
||||
' doc = db._document("places/example1");\n' +
|
||||
' db._update("places/example1", { someValue: 23 });\n' +
|
||||
' db._update(doc, { someValue: 42 });\n\n' +
|
||||
'Note that the first update will succeed because it was unconditional. The second ' +
|
||||
"update however is conditional because we're also passing the document's revision " +
|
||||
"id in the first parameter to _update. As the revision id we're passing to update " +
|
||||
"does not match the document's current revision anymore, the update is rejected."
|
||||
'Note that the first update will succeed because it was unconditional.\n\n' +
|
||||
"The second update however is conditional because we're also passing the\n" +
|
||||
"document's revision id in the first parameter to _update. As the revision id\n" +
|
||||
"we're passing to update does not match the document's current revision anymore,\n" +
|
||||
'the update is rejected.'
|
||||
},
|
||||
{
|
||||
title: 'Removing Documents',
|
||||
|
@ -130,28 +148,41 @@ var lessons = [
|
|||
},
|
||||
{
|
||||
title: 'Searching Documents',
|
||||
text: 'Searching for documents with specific attributes can be done by using the ' +
|
||||
'byExample method:\n\n' +
|
||||
text: 'Searching for documents with specific attributes can be done by using the\n' +
|
||||
"'byExample' method:\n\n" +
|
||||
' db._create("users");\n' +
|
||||
' for (i = 0; i < 10; ++i) { ' +
|
||||
'db.users.save({ name: "username" + i, active: (i % 3 == 0), age: 30 + i }); }\n' +
|
||||
' for (var i = 0; i < 10; ++i) {\n' +
|
||||
' db.users.save({ name: "username" + i, active: (i % 3 == 0), age: 30 + i });\n' +
|
||||
' }\n' +
|
||||
' db.users.byExample({ active: false }).toArray();\n' +
|
||||
' db.users.byExample({ name: "username3", active: true }).toArray();\n'
|
||||
' db.users.byExample({ name: "username3", active: true }).toArray();'
|
||||
},
|
||||
{
|
||||
title: 'Running AQL Queries',
|
||||
text: 'ArangoDB also provides a query language for more complex matching:\n\n' +
|
||||
' db._query("FOR u IN users FILTER u.active == true && u.age >= 33 ' +
|
||||
'RETURN { username: u.name, age: u.age }").toArray();'
|
||||
text: 'ArangoDB also provides a query language (AQL) for more complex matching:\n\n' +
|
||||
' db._query(`\n' +
|
||||
' FOR u IN users\n' +
|
||||
' FILTER u.active == true && u.age >= 33\n' +
|
||||
' RETURN { username: u.name, age: u.age }\n' +
|
||||
' `).toArray();\n\n' +
|
||||
'Wrapping multi-line queries in backticks is the most convenient way in\n' +
|
||||
"today's JavaScript.\n\n" +
|
||||
'See our online documentation for more details on AQL:\n' +
|
||||
'https://docs.arangodb.com/'
|
||||
},
|
||||
{
|
||||
title: 'Using Databases',
|
||||
text: 'By default, the ArangoShell connects to the default database. The default database ' +
|
||||
"is named '_system'. To create another database, use the '_createDatabase' method of the " +
|
||||
"'db' object. To switch into an existing database, use '_useDatabase'. To get rid of a " +
|
||||
"database and all of its collections, use '_dropDatabase':\n\n" +
|
||||
text: 'By default, the ArangoShell connects to the default database.\n' +
|
||||
"The default database is named '_system'. To create another database, use the\n" +
|
||||
"'_createDatabase' method of the 'db' object. To switch to an existing database,\n" +
|
||||
"use '_useDatabase':\n\n" +
|
||||
' db._createDatabase("mydb");\n' +
|
||||
' db._useDatabase("mydb");\n' +
|
||||
' db._useDatabase("mydb");'
|
||||
},
|
||||
{
|
||||
title: 'Removing Databases',
|
||||
text: "To get rid of a database and all of its collections, use '_dropDatabase'.\n" +
|
||||
"It needs to be called from within the '_system' database:\n\n" +
|
||||
' db._useDatabase("_system");\n' +
|
||||
' db._dropDatabase("mydb");'
|
||||
}
|
||||
|
|
|
@ -49,6 +49,8 @@ function agencyTestSuite () {
|
|||
var whoseTurn = 0;
|
||||
var request = require("@arangodb/request");
|
||||
|
||||
wait(2);
|
||||
|
||||
function readAgency(list) {
|
||||
// We simply try all agency servers in turn until one gives us an HTTP
|
||||
// response:
|
||||
|
@ -97,14 +99,6 @@ function agencyTestSuite () {
|
|||
tearDown : function () {
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief startup timing
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testStartup : function () {
|
||||
assertEqual(readAndCheck([["/x"]]), [{}]);
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test to write a single top level key
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -1832,6 +1832,8 @@ function arangoErrorToHttpCode (num) {
|
|||
case arangodb.ERROR_ARANGO_COLLECTION_NOT_LOADED:
|
||||
case arangodb.ERROR_ARANGO_DOCUMENT_REV_BAD:
|
||||
return exports.HTTP_BAD;
|
||||
case arangodb.ERROR_CLUSTER_BACKEND_UNAVAILABLE:
|
||||
return exports.HTTP_SERVICE_UNAVAILABLE;
|
||||
}
|
||||
|
||||
return exports.HTTP_BAD;
|
||||
|
|
|
@ -322,7 +322,7 @@ function createLocalDatabases (plannedDatabases, currentDatabases, writeLocked)
|
|||
|
||||
// TODO: handle options and user information
|
||||
|
||||
console.info("creating local database '%s'", payload.name);
|
||||
console.debug("creating local database '%s'", payload.name);
|
||||
|
||||
try {
|
||||
db._createDatabase(payload.name);
|
||||
|
@ -528,7 +528,7 @@ function createLocalCollections (plannedCollections, planVersion,
|
|||
|
||||
if (!localCollections.hasOwnProperty(shard)) {
|
||||
// must create this shard
|
||||
console.info("creating local shard '%s/%s' for central '%s/%s'",
|
||||
console.debug("creating local shard '%s/%s' for central '%s/%s'",
|
||||
database,
|
||||
shard,
|
||||
database,
|
||||
|
@ -642,7 +642,7 @@ function createLocalCollections (plannedCollections, planVersion,
|
|||
|
||||
if (index.type !== 'primary' && index.type !== 'edge' &&
|
||||
!indexes.hasOwnProperty(index.id)) {
|
||||
console.info("creating index '%s/%s': %s",
|
||||
console.debug("creating index '%s/%s': %s",
|
||||
database,
|
||||
shard,
|
||||
JSON.stringify(index));
|
||||
|
@ -982,7 +982,7 @@ function tryLaunchJob () {
|
|||
return;
|
||||
}
|
||||
global.KEY_SET('shardSynchronization', 'running', jobInfo);
|
||||
console.info('scheduleOneShardSynchronization: have launched job', jobInfo);
|
||||
console.debug('scheduleOneShardSynchronization: have launched job', jobInfo);
|
||||
delete jobs.scheduled[shards[0]];
|
||||
global.KEY_SET('shardSynchronization', 'scheduled', jobs.scheduled);
|
||||
}
|
||||
|
@ -1062,7 +1062,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
var ok = false;
|
||||
const rep = require('@arangodb/replication');
|
||||
|
||||
console.info("synchronizeOneShard: trying to synchronize local shard '%s/%s' for central '%s/%s'", database, shard, database, planId);
|
||||
console.debug("synchronizeOneShard: trying to synchronize local shard '%s/%s' for central '%s/%s'", database, shard, database, planId);
|
||||
try {
|
||||
var ep = ArangoClusterInfo.getServerEndpoint(leader);
|
||||
// First once without a read transaction:
|
||||
|
@ -1123,7 +1123,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
shard);
|
||||
}
|
||||
if (ok) {
|
||||
console.info('synchronizeOneShard: synchronization worked for shard',
|
||||
console.debug('synchronizeOneShard: synchronization worked for shard',
|
||||
shard);
|
||||
} else {
|
||||
throw 'Did not work for shard ' + shard + '.';
|
||||
|
@ -1139,7 +1139,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
|
|||
}
|
||||
// Tell others that we are done:
|
||||
terminateAndStartOther();
|
||||
console.info('synchronizeOneShard: done, %s/%s, %s/%s',
|
||||
console.debug('synchronizeOneShard: done, %s/%s, %s/%s',
|
||||
database, shard, database, planId);
|
||||
}
|
||||
|
||||
|
@ -1627,7 +1627,7 @@ var handlePlanChange = function (plan, current) {
|
|||
try {
|
||||
versions.success = handleChanges(plan, current, writeLocked);
|
||||
|
||||
console.info('plan change handling successful');
|
||||
console.debug('plan change handling successful');
|
||||
} catch (err) {
|
||||
console.error('error details: %s', JSON.stringify(err));
|
||||
console.error('error stack: %s', err.stack);
|
||||
|
@ -1829,7 +1829,8 @@ function rebalanceShards () {
|
|||
db._useDatabase('_system');
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
console.info("Rebalancing shards");
|
||||
console.info(shardMap);
|
||||
console.info(dbTab);
|
||||
|
||||
|
|
|
@ -257,12 +257,13 @@ function optimizerIndexesSortTestSuite () {
|
|||
c.ensureHashIndex("value2", "value3");
|
||||
|
||||
var queries = [
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2, i.value3 RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC, i.value3 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC, i.value3 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && PASSTHRU(1) SORT i.value2 RETURN i.value2", true ]
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && PASSTHRU(1) SORT i.value2 RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2, i.value4 RETURN i.value2", true ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
|
@ -313,10 +314,12 @@ function optimizerIndexesSortTestSuite () {
|
|||
var queries = [
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC, i.value3 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC, i.value3 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", true ]
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2, i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value4, i.value2 DESC RETURN i.value2", true ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
|
@ -356,12 +359,12 @@ function optimizerIndexesSortTestSuite () {
|
|||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2" ,false ],
|
||||
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 ASC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 DESC RETURN i.value2", false ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 DESC, i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
|
||||
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 DESC RETURN i.value2", true ]
|
||||
|
|
|
@ -256,7 +256,56 @@ function optimizerRuleTestSuite () {
|
|||
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
|
||||
assertEqual(simplePlan[2].type, "NoResultsNode");
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test multiple conditions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCondVars1 : function () {
|
||||
var queries = [
|
||||
"LET data = (FOR i IN 1..1 RETURN i) FOR v, e, p IN 1..10 OUTBOUND data GRAPH '" + graphName + "' FILTER p.vertices[0]._id == '123' FILTER p.vertices[1]._id != null FILTER p.edges[0]._id IN data[*].foo.bar RETURN 1"
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query);
|
||||
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
|
||||
assertEqual(0, AQL_EXECUTE(query).json.length);
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test multiple conditions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCondVars2 : function () {
|
||||
var queries = [
|
||||
"LET data = (FOR i IN 1..1 RETURN i) FOR v, e, p IN 1..10 OUTBOUND 'circles/A' GRAPH '" + graphName + "' FILTER p.vertices[0]._id == '123' FILTER p.vertices[1]._id != null FILTER p.edges[0]._id IN data[*].foo.bar RETURN 1"
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query);
|
||||
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
|
||||
assertEqual(0, AQL_EXECUTE(query).json.length);
|
||||
});
|
||||
},
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
/// @brief test multiple conditions
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
testCondVars3 : function () {
|
||||
var queries = [
|
||||
"LET data = (FOR i IN 1..1 RETURN i) FOR v, e, p IN 1..10 OUTBOUND 'circles/A' GRAPH '" + graphName + "' FILTER p.vertices[0]._id == '123' FILTER p.vertices[1]._id != null FILTER p.edges[0]._id IN data[*].foo.bar FILTER p.edges[1]._key IN data[*].bar.baz._id RETURN 1"
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query);
|
||||
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
|
||||
assertEqual(0, AQL_EXECUTE(query).json.length);
|
||||
});
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -329,8 +329,9 @@ function optimizerRuleTestSuite() {
|
|||
hasIndexNodeWithRanges(result);
|
||||
|
||||
result = AQL_EXPLAIN(query, { }, paramIndexRangeSortFilter);
|
||||
assertEqual([ IndexesRule, FilterRemoveRule ],
|
||||
removeAlwaysOnClusterRules(result.plan.rules), query);
|
||||
var rules = removeAlwaysOnClusterRules(result.plan.rules);
|
||||
assertNotEqual(-1, rules.indexOf(IndexesRule));
|
||||
assertNotEqual(-1, rules.indexOf(FilterRemoveRule));
|
||||
hasNoFilterNode(result);
|
||||
hasIndexNodeWithRanges(result);
|
||||
|
||||
|
|
|
@ -158,45 +158,49 @@ function optimizerRuleTestSuite() {
|
|||
skiplist.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false });
|
||||
|
||||
var queries = [
|
||||
[ "FOR v IN " + colName + " FILTER v.u == 1 SORT v.u RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.c RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.z RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.f RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.z RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y, v.z RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z, v.y RETURN 1", false ], // not supported yet
|
||||
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.d RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.d == 1 && v.e == 1 SORT v.d RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.e RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.a, v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b, v.a RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.c RETURN 1", false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", false ]
|
||||
[ "FOR v IN " + colName + " FILTER v.u == 1 SORT v.u RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.c RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.z RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.f RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.z RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y, v.z RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z, v.y RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.d RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.d == 1 && v.e == 1 SORT v.d RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.e RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.a, v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b, v.a RETURN 1", false, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.c RETURN 1", true, true ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", true, false ],
|
||||
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b, v.c RETURN 1", true, true ]
|
||||
];
|
||||
|
||||
queries.forEach(function(query) {
|
||||
var result = AQL_EXPLAIN(query[0]);
|
||||
if (query[1]) {
|
||||
assertNotEqual(-1, removeAlwaysOnClusterRules(result.plan.rules).indexOf(ruleName), query[0]);
|
||||
hasNoSortNode(result);
|
||||
}
|
||||
else {
|
||||
assertEqual(-1, removeAlwaysOnClusterRules(result.plan.rules).indexOf(ruleName), query[0]);
|
||||
}
|
||||
if (query[2]) {
|
||||
hasSortNode(result);
|
||||
} else {
|
||||
hasNoSortNode(result);
|
||||
}
|
||||
});
|
||||
},
|
||||
|
@ -419,7 +423,7 @@ function optimizerRuleTestSuite() {
|
|||
QResults[2] = AQL_EXECUTE(query, { }, paramIndexFromSort_IndexRange).json;
|
||||
XPresult = AQL_EXPLAIN(query, { }, paramIndexFromSort_IndexRange);
|
||||
|
||||
assertEqual([ secondRuleName ], removeAlwaysOnClusterRules(XPresult.plan.rules).sort());
|
||||
assertEqual([ ruleName, secondRuleName ], removeAlwaysOnClusterRules(XPresult.plan.rules).sort());
|
||||
// The sortnode and its calculation node should not have been removed.
|
||||
hasSortNode(XPresult);
|
||||
hasCalculationNodes(XPresult, 4);
|
||||
|
@ -1069,7 +1073,30 @@ function optimizerRuleTestSuite() {
|
|||
}
|
||||
});
|
||||
assertTrue(seen);
|
||||
},
|
||||
|
||||
testSortModifyFilterCondition : function () {
|
||||
var query = "FOR v IN " + colName + " FILTER v.a == 123 SORT v.a, v.xxx RETURN v";
|
||||
var rules = AQL_EXPLAIN(query).plan.rules;
|
||||
assertNotEqual(-1, rules.indexOf(ruleName));
|
||||
assertNotEqual(-1, rules.indexOf(secondRuleName));
|
||||
assertNotEqual(-1, rules.indexOf("remove-filter-covered-by-index"));
|
||||
|
||||
var nodes = AQL_EXPLAIN(query).plan.nodes;
|
||||
var seen = 0;
|
||||
nodes.forEach(function(node) {
|
||||
if (node.type === "IndexNode") {
|
||||
++seen;
|
||||
assertFalse(node.reverse);
|
||||
} else if (node.type === "SortNode") {
|
||||
// first sort condition (v.a) should have been removed because it is const
|
||||
++seen;
|
||||
assertEqual(1, node.elements.length);
|
||||
}
|
||||
});
|
||||
assertEqual(2, seen);
|
||||
}
|
||||
|
||||
};
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,7 @@
|
|||
#define ARANGODB_BASICS_STRING_BUFFER_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Zip/zip.h"
|
||||
|
||||
|
@ -397,6 +398,10 @@ class StringBuffer {
|
|||
|
||||
explicit StringBuffer(TRI_memory_zone_t* zone, bool initializeMemory = true) {
|
||||
TRI_InitStringBuffer(&_buffer, zone, initializeMemory);
|
||||
|
||||
if (_buffer._buffer == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -405,6 +410,10 @@ class StringBuffer {
|
|||
|
||||
StringBuffer(TRI_memory_zone_t* zone, size_t initialSize, bool initializeMemory = true) {
|
||||
TRI_InitSizedStringBuffer(&_buffer, zone, initialSize, initializeMemory);
|
||||
|
||||
if (_buffer._buffer == nullptr) {
|
||||
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
|
||||
}
|
||||
}
|
||||
|
||||
//////////////////////////////////////////////////////////////////////////////
|
||||
|
|
|
@ -26,6 +26,7 @@
|
|||
#include "Basics/Exceptions.h"
|
||||
#include "Logger/Logger.h"
|
||||
#include "Basics/StaticStrings.h"
|
||||
#include "Basics/StringBuffer.h"
|
||||
#include "Basics/StringUtils.h"
|
||||
#include "Basics/Utf8Helper.h"
|
||||
#include "Basics/VPackStringBufferAdapter.h"
|
||||
|
@ -431,19 +432,17 @@ static bool PrintVelocyPack(int fd, VPackSlice const& slice,
|
|||
return false;
|
||||
}
|
||||
|
||||
TRI_string_buffer_t buffer;
|
||||
TRI_InitStringBuffer(&buffer, TRI_UNKNOWN_MEM_ZONE);
|
||||
arangodb::basics::VPackStringBufferAdapter bufferAdapter(&buffer);
|
||||
arangodb::basics::StringBuffer buffer(TRI_UNKNOWN_MEM_ZONE);
|
||||
arangodb::basics::VPackStringBufferAdapter bufferAdapter(buffer.stringBuffer());
|
||||
try {
|
||||
VPackDumper dumper(&bufferAdapter);
|
||||
dumper.dump(slice);
|
||||
} catch (...) {
|
||||
// Writing failed
|
||||
TRI_AnnihilateStringBuffer(&buffer);
|
||||
return false;
|
||||
}
|
||||
|
||||
if (TRI_LengthStringBuffer(&buffer) == 0) {
|
||||
if (buffer.length() == 0) {
|
||||
// should not happen
|
||||
return false;
|
||||
}
|
||||
|
@ -451,17 +450,16 @@ static bool PrintVelocyPack(int fd, VPackSlice const& slice,
|
|||
if (appendNewline) {
|
||||
// add the newline here so we only need one write operation in the ideal
|
||||
// case
|
||||
TRI_AppendCharStringBuffer(&buffer, '\n');
|
||||
buffer.appendChar('\n');
|
||||
}
|
||||
|
||||
char const* p = TRI_BeginStringBuffer(&buffer);
|
||||
size_t n = TRI_LengthStringBuffer(&buffer);
|
||||
char const* p = buffer.begin();
|
||||
size_t n = buffer.length();
|
||||
|
||||
while (0 < n) {
|
||||
ssize_t m = TRI_WRITE(fd, p, (TRI_write_t)n);
|
||||
|
||||
if (m <= 0) {
|
||||
TRI_AnnihilateStringBuffer(&buffer);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -469,7 +467,6 @@ static bool PrintVelocyPack(int fd, VPackSlice const& slice,
|
|||
p += m;
|
||||
}
|
||||
|
||||
TRI_AnnihilateStringBuffer(&buffer);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@
|
|||
#define ARANGODB_PROGRAM_OPTIONS_PARAMETERS_H 1
|
||||
|
||||
#include "Basics/Common.h"
|
||||
#include "Basics/Exceptions.h"
|
||||
#include "Basics/fpconv.h"
|
||||
|
||||
#include <velocypack/Builder.h>
|
||||
|
@ -305,7 +306,25 @@ template <typename T>
|
|||
struct DiscreteValuesParameter : public T {
|
||||
DiscreteValuesParameter(typename T::ValueType* ptr,
|
||||
std::unordered_set<typename T::ValueType> const& allowed)
|
||||
: T(ptr), allowed(allowed) {}
|
||||
: T(ptr), allowed(allowed) {
|
||||
|
||||
if (allowed.find(*ptr) == allowed.end()) {
|
||||
// default value is not in list of allowed values
|
||||
std::string msg("invalid default value for DiscreteValues parameter: ");
|
||||
msg.append(stringifyValue(*ptr));
|
||||
msg.append(". allowed values: ");
|
||||
size_t i = 0;
|
||||
for (auto const& it : allowed) {
|
||||
if (i > 0) {
|
||||
msg.append(" or ");
|
||||
}
|
||||
msg.append(stringifyValue(it));
|
||||
++i;
|
||||
}
|
||||
|
||||
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
std::string set(std::string const& value) override {
|
||||
auto it = allowed.find(fromString<typename T::ValueType>(value));
|
||||
|
|
|
@ -85,7 +85,7 @@ void SslServerFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
|
|||
|
||||
options->addHiddenOption(
|
||||
"--ssl.options", "ssl connection options, see OpenSSL documentation",
|
||||
new DiscreteValuesParameter<UInt64Parameter>(&_sslOptions, sslProtocols));
|
||||
new UInt64Parameter(&_sslOptions));
|
||||
|
||||
options->addOption(
|
||||
"--ssl.ecdh-curve",
|
||||
|
|
Loading…
Reference in New Issue