1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/ArangoDB into pipeline

This commit is contained in:
Wilfried Goesgens 2016-08-24 14:46:48 +02:00
commit 1688655509
62 changed files with 1065 additions and 396 deletions

View File

@ -1,7 +1,7 @@
devel
-----
* added a memory expection of V8 memory gets to low
* added a memory expection in case V8 memory gets too low
* fixed epoch computation in hybrid logical clock
@ -43,7 +43,16 @@ devel
* added module.context.createDocumentationRouter to replace module.context.apiDocumentation
v3.0.5 (XXXX-XX-XX)
v3.0.6 (XXXX-XX-XX)
-------------------
* fix execution of AQL traversal expressions when there are multiple
conditions that refer to variables set outside the traversal
* properly return HTTP 503 in JS actions when backend is gone
v3.0.5 (2016-08-18)
-------------------
* execute AQL ternary operator via C++ if possible

View File

@ -24,15 +24,15 @@ a *_key* attribute. If no *_key* attribute is provided, ArangoDB will auto-gener
a value for *_key* value. Inserting a document will also auto-generate a document
revision number for the document.
```
```js
FOR i IN 1..100
INSERT { value: i } IN numbers
```
When inserting into an [edge collection](../../Manual/Appendix/Glossary.html#edge-collection), it is mandatory to specify the attributes
*_from* and *_to* in document:
When inserting into an [edge collection](../../Manual/Appendix/Glossary.html#edge-collection),
it is mandatory to specify the attributes *_from* and *_to* in document:
```
```js
FOR u IN users
FOR p IN products
FILTER u._key == p.recommendedBy
@ -44,17 +44,25 @@ FOR u IN users
*options* can be used to suppress query errors that may occur when violating unique
key constraints:
```
```js
FOR i IN 1..1000
INSERT { _key: CONCAT('test', i), name: "test" } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true }
INSERT {
_key: CONCAT('test', i),
name: "test",
foobar: true
} INTO users OPTIONS { ignoreErrors: true }
```
To make sure data are durable when an insert query returns, there is the *waitForSync*
query option:
```
```js
FOR i IN 1..1000
INSERT { _key: CONCAT('test', i), name: "test" } WITH { foobar: true } IN users OPTIONS { waitForSync: true }
INSERT {
_key: CONCAT('test', i),
name: "test",
foobar: true
} INTO users OPTIONS { waitForSync: true }
```
!SUBSECTION Returning the inserted documents
@ -68,18 +76,16 @@ The documents contained in `NEW` will contain all attributes, even those auto-ge
the database (e.g. `_id`, `_key`, `_rev`).
```
```js
INSERT document IN collection options RETURN NEW
```
Following is an example using a variable named `inserted` to return the inserted
documents. For each inserted document, the document key is returned:
```
```js
FOR i IN 1..100
INSERT { value: i }
LET inserted = NEW
RETURN inserted._key
```

View File

@ -29,6 +29,80 @@ scope the *RETURN* is placed in can be used for the computations.
Note: *RETURN* will close the current scope and eliminate all local variables in
it.
[Dynamic attribute names](../Fundamentals/DataTypes.md#objects--documents) are
supported as well:
```js
FOR u IN users
RETURN { [ u._id ]: u.age }
```
The document *_id* of every user is used as expression to compute the
attribute key in this example:
```json
[
{
"users/9883": 32
},
{
"users/9915": 27
},
{
"users/10074": 69
}
]
```
The result contains one object per user with a single key/value pair each.
This is usually not desired. For a single object, that maps user IDs to ages,
the individual results need to be merged and returned with another `RETURN`:
```js
RETURN MERGE(
FOR u IN users
RETURN { [ u._id ]: u.age }
)
```
```json
[
{
"users/10074": 69,
"users/9883": 32,
"users/9915": 27
}
]
```
Keep in mind that if the key expression evaluates to the same value multiple
times, only one of the key/value pairs with the duplicate name will survive
[MERGE()](../Functions/Document.md#merge). To avoid this, you can go without
dynamic attribute names, use static names instead and return all document
properties as attribute values:
```js
FOR u IN users
RETURN { name: u.name, age: u.age }
```
```json
[
{
"name": "John Smith",
"age": 32
},
{
"name": "James Hendrix",
"age": 69
},
{
"name": "Katie Foster",
"age": 27
}
]
```
!SUBSECTION RETURN DISTINCT
Since ArangoDB 2.7, *RETURN* can optionally be followed by the *DISTINCT* keyword.

View File

@ -23,17 +23,17 @@ be updated. *document* must be a document that contains the attributes and value
to be updated. When using the first syntax, *document* must also contain the *_key*
attribute to identify the document to be updated.
```
```js
FOR u IN users
UPDATE { _key: u._key, name: CONCAT(u.firstName, u.lastName) } IN users
UPDATE { _key: u._key, name: CONCAT(u.firstName, " ", u.lastName) } IN users
```
The following query is invalid because it does not contain a *_key* attribute and
thus it is not possible to determine the documents to be updated:
```
```js
FOR u IN users
UPDATE { name: CONCAT(u.firstName, u.lastName) } IN users
UPDATE { name: CONCAT(u.firstName, " ", u.lastName) } IN users
```
When using the second syntax, *keyExpression* provides the document identification.
@ -42,21 +42,21 @@ document, which must contain a *_key* attribute.
The following queries are equivalent:
```
```js
FOR u IN users
UPDATE u._key WITH { name: CONCAT(u.firstName, u.lastName) } IN users
UPDATE u._key WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
FOR u IN users
UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, u.lastName) } IN users
UPDATE { _key: u._key } WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
FOR u IN users
UPDATE u WITH { name: CONCAT(u.firstName, u.lastName) } IN users
UPDATE u WITH { name: CONCAT(u.firstName, " ", u.lastName) } IN users
```
An update operation may update arbitrary documents which do not need to be identical
to the ones produced by a preceding *FOR* statement:
```
```js
FOR i IN 1..1000
UPDATE CONCAT('test', i) WITH { foobar: true } IN users
@ -65,14 +65,74 @@ FOR u IN users
UPDATE u WITH { status: 'inactive' } IN backup
```
!SUBSECTION Using the current value of a document attribute
The pseudo-variable `OLD` is not supported inside of `WITH` clauses (it is
available after `UPDATE`). To access the current attribute value, you can
usually refer to a document via the variable of the `FOR` loop, which is used
to iterate over a collection:
```js
FOR doc IN users
UPDATE doc WITH {
fullName: CONCAT(doc.firstName, " ", doc.lastName)
} IN users
```
If there is no loop, because a single document is updated only, then there
might not be a variable like above (`doc`), which would let you refer to the
document which is being updated:
```js
UPDATE "users/john" WITH { ... } IN users
```
To access the current value in this situation, the document has to be retrieved
and stored in a variable first:
```js
LET doc = DOCUMENT("users/john")
UPDATE doc WITH {
fullName: CONCAT(doc.firstName, " ", doc.lastName)
} IN users
```
An existing attribute can be modified based on its current value this way,
to increment a counter for instance:
```js
UPDATE doc WITH {
karma: doc.karma + 1
} IN users
```
If the attribute `karma` doesn't exist yet, `doc.karma` is evaluated to *null*.
The expression `null + 1` results in the new attribute `karma` being set to *1*.
If the attribute does exist, then it is increased by *1*.
Arrays can be mutated too of course:
```js
UPDATE doc WITH {
hobbies: PUSH(doc.hobbies, "swimming")
} IN users
```
If the attribute `hobbies` doesn't exist yet, it is conveniently initialized
as `[ "swimming" ]` and otherwise extended.
!SUBSECTION Setting query options
*options* can be used to suppress query errors that may occur when trying to
update non-existing documents or violating unique key constraints:
```
```js
FOR i IN 1..1000
UPDATE { _key: CONCAT('test', i) } WITH { foobar: true } IN users OPTIONS { ignoreErrors: true }
UPDATE {
_key: CONCAT('test', i)
} WITH {
foobar: true
} IN users OPTIONS { ignoreErrors: true }
```
An update operation will only update the attributes specified in *document* and
@ -84,9 +144,12 @@ When updating an attribute with a null value, ArangoDB will not remove the attri
from the document but store a null value for it. To get rid of attributes in an update
operation, set them to null and provide the *keepNull* option:
```
```js
FOR u IN users
UPDATE u WITH { foobar: true, notNeeded: null } IN users OPTIONS { keepNull: false }
UPDATE u WITH {
foobar: true,
notNeeded: null
} IN users OPTIONS { keepNull: false }
```
The above query will remove the *notNeeded* attribute from the documents and update
@ -100,17 +163,21 @@ The following query will set the updated document's *name* attribute to the exac
same value that is specified in the query. This is due to the *mergeObjects* option
being set to *false*:
```
```js
FOR u IN users
UPDATE u WITH { name: { first: "foo", middle: "b.", last: "baz" } } IN users OPTIONS { mergeObjects: false }
UPDATE u WITH {
name: { first: "foo", middle: "b.", last: "baz" }
} IN users OPTIONS { mergeObjects: false }
```
Contrary, the following query will merge the contents of the *name* attribute in the
original document with the value specified in the query:
```
```js
FOR u IN users
UPDATE u WITH { name: { first: "foo", middle: "b.", last: "baz" } } IN users OPTIONS { mergeObjects: true }
UPDATE u WITH {
name: { first: "foo", middle: "b.", last: "baz" }
} IN users OPTIONS { mergeObjects: true }
```
Attributes in *name* that are present in the to-be-updated document but not in the
@ -123,9 +190,11 @@ explicitly.
To make sure data are durable when an update query returns, there is the *waitForSync*
query option:
```
```js
FOR u IN users
UPDATE u WITH { foobar: true } IN users OPTIONS { waitForSync: true }
UPDATE u WITH {
foobar: true
} IN users OPTIONS { waitForSync: true }
```
!SUBSECTION Returning the modified documents
@ -149,7 +218,7 @@ UPDATE keyExpression WITH document IN collection options RETURN NEW
Following is an example using a variable named `previous` to capture the original
documents before modification. For each modified document, the document key is returned.
```
```js
FOR u IN users
UPDATE u WITH { value: "test" }
LET previous = OLD
@ -159,7 +228,7 @@ FOR u IN users
The following query uses the `NEW` pseudo-value to return the updated documents,
without some of the system attributes:
```
```js
FOR u IN users
UPDATE u WITH { value: "test" }
LET updated = NEW
@ -168,9 +237,8 @@ FOR u IN users
It is also possible to return both `OLD` and `NEW`:
```
```js
FOR u IN users
UPDATE u WITH { value: "test" }
RETURN { before: OLD, after: NEW }
```

View File

@ -11,8 +11,8 @@ the server on port 8529 on the localhost. For more information see the
unix> ./arangosh --server.endpoint tcp://127.0.0.1:8529 --server.username root
```
The shell will print its own version number and if successfully connected
to a server the version number of the ArangoDB server.
The shell will print its own version number and if successfully connected
to a server the version number of the ArangoDB server.
!SECTION Command-Line Options
@ -73,3 +73,22 @@ for (i = 0; i < 100000; i++) {
Since the *arangosh* version will be doing around 100k HTTP requests, and the
*arangod* version will directly write to the database.
!SECTION Using `arangosh` via unix shebang mechanisms
In unix operating systems you can start scripts by specifying the interpreter in the first line of the script.
This is commonly called `shebang` or `hash bang`. You can also do that with `arangosh`, i.e. create `~/test.js`:
#!/usr/bin/arangosh --javascript.execute
require("internal").print("hello world")
db._query("FOR x IN test RETURN x").toArray()
Note that the first line has to end with a blank in order to make it work.
Mark it executable to the OS:
#> chmod a+x ~/test.js
and finaly try it out:
#> ~/test.js

View File

@ -25,12 +25,12 @@ Named graphs are completely managed by arangodb, and thus also [visible in the w
They use the full spectrum of ArangoDBs graph features. You may access them via several interfaces.
- [AQL Graph Operations](../../AQL/Graphs/index.html) with several flavors:
- [AQL Traversals](../../AQL/Graphs/Traversals.html) on both named and anonymous graphs
- [AQL Shortest Path](../../AQL/Graphs/ShortestPath.html) on both named and anonymous graph
- [The javascript General Graph implementation, as you may use it in Foxx Services](GeneralGraphs/README.md)
- [Graph Management](GeneralGraphs/Management.md); creating & manipualating graph definitions; inserting, updating and deleting vertices and edges into graphs
- [Graph Functions](GeneralGraphs/Functions.md) for working with edges and vertices, to analyze them and their relations
- [the RESTful General Graph interface](../../HTTP/Gharial/index.html) used to implement graph management in client drivers
- [AQL Traversals](../../AQL/Graphs/Traversals.html) on both named and anonymous graphs
- [AQL Shortest Path](../../AQL/Graphs/ShortestPath.html) on both named and anonymous graph
- [JavaScript General Graph implementation, as you may use it in Foxx Services](GeneralGraphs/README.md)
- [Graph Management](GeneralGraphs/Management.md); creating & manipualating graph definitions; inserting, updating and deleting vertices and edges into graphs
- [Graph Functions](GeneralGraphs/Functions.md) for working with edges and vertices, to analyze them and their relations
- [RESTful General Graph interface](../../HTTP/Gharial/index.html) used to implement graph management in client drivers
!SUBSUBSECTION Manipulating collections of named graphs with regular document functions
@ -87,10 +87,49 @@ So, if your edges have about a dozen different types, it's okay to choose the co
the `FILTER` approach is preferred. You can still use `FILTER` operations on edges of course. You can get rid
of a `FILTER` on the `type` with the former approach, everything else can stay the same.
!SUBSECTION Which part of my data is an Edge and which a Vertex?
The main objects in your data model, such as users, groups or articles, are usually considered to be vertices.
For each type of object, a document collection (also called vertex collection) should store the individual entities.
Entities can be connected by edges to express and classify relations between vertices. It often makes sense to have
an edge collection per relation type.
ArangoDB does not require you to store your data in graph structures with edges and vertices, you can also decide
to embed attributes such as which groups a user is part of, or `_id`s of documents in another document instead of
connecting the documents with edges. It can be a meaningful performance optimization for *1:n* relationships, if
your data is not very focused on relations and you don't need graph traversal with varying depth. It usually means
to introduce some redundancy and possibly inconsistencies if you embed data, but it can be an acceptable tradeoff.
!SUBSUBSECTION Vertices
Let's say we have two vertex collections, `Users` and `Groups`. Documents in the `Groups` collection contain the attributes
of the Group, i.e. when it was founded, its subject, an icon URL and so on. `Users` documents contain the data specific to a
user - like all names, birthdays, Avatar URLs, hobbies...
!SUBSUBSECTION Edges
We can use an edge collection to store relations between users and groups. Since multiple users may be in an arbitrary
number of groups, this is an **m:n** relation. The edge collection can be called `UsersInGroups` with i.e. one edge
with `_from` pointing to `Users/John` and `_to` pointing to `Groups/BowlingGroupHappyPin`. This makes the user **John**
a member of the group **Bowling Group Happy Pin**. Attributes of this relation may contain qualifiers to this relation,
like the permissions of **John** in this group, the date when he joined the group etc.
![User in group example](graph_user_in_group.png)
!SUBSUBSECTION Advantages of this approach
Graphs give you the advantage of not just being able to have a fixed number of **m:n** relations in a row, but an
arbitrary number. Edges can be traversed in both directions, so it's very easy to determine all groups a user is in,
but also to find out which members a certain group has. Users could also be interconnected to create a social network.
Using the graph data model, dealing with data that has lots of relations stays manageable and can be queried in very
flexible ways, whereas it would cause headache to handle it in a relational database system.
!SUBSECTION Backup and restore
For sure you want to have backups of your graph data, you can use [Arangodump](../Administration/Arangodump.md) to create the backup,
and [Arangorestore](../Administration/Arangorestore.md) to restore a backup into a new ArangoDB. You should however note that:
- you need the system collection `_graphs` if you backup named graphs.
- you need to backup the complete set of all edge and vertex collections your graph consists of. Partial dump/restore may not work.

View File

@ -0,0 +1,6 @@
Users UsersInGroups Groups
+----------+ +----------------------+
| John +---------------->| BowlingGroupHappyPin |
+----------+ +----------------------+

View File

@ -1,5 +1,7 @@
!CHAPTER Limitations
!SECTION In General
Transactions in ArangoDB have been designed with particular use cases
in mind. They will be mainly useful for short and small data retrieval
and/or modification operations.
@ -49,3 +51,14 @@ It is legal to not declare read-only collections, but this should be avoided if
possible to reduce the probability of deadlocks and non-repeatable reads.
Please refer to [Locking and Isolation](LockingAndIsolation.md) for more details.
!SECTION In Clusters
Using a single instance of ArangoDB, multi-document / multi-collection queries
are guaranteed to be fully ACID. This is more than many other NoSQL database
systems support. In cluster mode, single-document operations are also fully ACID.
Multi-document / multi-collection queries in a cluster are not ACID, which is
equally the case with competing database systems. Transactions in a cluster
will be supported in a future version of ArangoDB and make these operations
fully ACID as well. Note that for non-sharded collections in a cluster, the
transactional properties of a single server apply (fully ACID).

View File

@ -138,6 +138,7 @@ MAKE_CMD_PREFIX=""
CONFIGURE_OPTIONS="-DCMAKE_INSTALL_PREFIX=/ $CMAKE_OPENSSL"
MAINTAINER_MODE="-DUSE_MAINTAINER_MODE=off"
TAR_SUFFIX=""
TARGET_DIR=""
CLANG36=0
CLANG=0
@ -211,11 +212,13 @@ while [ $# -gt 0 ]; do
;;
--sanitize)
TAR_SUFFIX="-sanitize"
SANITIZE=1
shift
;;
--coverage)
TAR_SUFFIX="-coverage"
COVERAGE=1
shift
;;
@ -419,7 +422,7 @@ if test -n "${TARGET_DIR}"; then
echo "building distribution tarball"
mkdir -p "${TARGET_DIR}"
dir="${TARGET_DIR}"
TARFILE=arangodb.tar.gz
TARFILE=arangodb-`uname`${TAR_SUFFIX}.tar.gz
TARFILE_TMP=`pwd`/arangodb.tar.$$
mkdir -p ${dir}

View File

@ -413,3 +413,8 @@
* GITHUB: https://github.com/jacomyal/sigma.js
* License: [MIT License](https://github.com/jacomyal/sigma.js/blob/master/LICENSE.txt)
#### wheelnav.js
* GITHUB: https://github.com/softwaretailoring/wheelnav
* License: [MIT License](https://github.com/softwaretailoring/wheelnav/blob/master/LICENSE)

View File

@ -348,7 +348,7 @@ priv_rpc_ret_t Agent::sendAppendEntriesRPC(std::string const& follower_id) {
arangodb::GeneralRequest::RequestType::POST, path.str(),
std::make_shared<std::string>(builder.toJson()), headerFields,
std::make_shared<AgentCallback>(this, follower_id, highest),
0.5*_config.minPing(), true, 0.75*_config.minPing());
0.1*_config.minPing(), true, 0.05*_config.minPing());
_lastSent[follower_id] = std::chrono::system_clock::now();
_lastHighest[follower_id] = highest;
@ -418,7 +418,6 @@ bool Agent::load() {
_spearhead.start();
_readDB.start();
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Starting constituent personality.";
TRI_ASSERT(queryRegistry != nullptr);
if (size() == 1) {
activateAgency();
@ -554,10 +553,84 @@ bool Agent::lead() {
// Wake up supervision
_supervision.wakeUp();
// Notify inactive pool
notifyInactive();
return true;
}
// Notify inactive pool members of configuration change()
void Agent::notifyInactive() const {
if (_config.poolSize() > _config.size()) {
size_t size = _config.size(),
counter = 0;
std::map<std::string,std::string> pool = _config.pool();
std::string path = "/_api/agency_priv/inform";
Builder out;
out.openObject();
out.add("term", VPackValue(term()));
out.add("id", VPackValue(id()));
out.add("active", _config.activeToBuilder()->slice());
out.add("pool", _config.poolToBuilder()->slice());
out.close();
for (auto const& p : pool) {
++counter;
if (counter > size) {
auto headerFields =
std::make_unique<std::unordered_map<std::string, std::string>>();
arangodb::ClusterComm::instance()->asyncRequest(
"1", 1, p.second, arangodb::GeneralRequest::RequestType::POST,
path, std::make_shared<std::string>(out.toJson()), headerFields,
nullptr, 1.0, true);
}
}
}
}
void Agent::notify(query_t const& message) {
VPackSlice slice = message->slice();
if (!slice.isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
20011, std::string("Inform message must be an object. Incoming type is ")
+ slice.typeName());
}
if (!slice.hasKey("id") || !slice.get("id").isString()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
20013, "Inform message must contain string parameter 'id'");
}
if (!slice.hasKey("term")) {
THROW_ARANGO_EXCEPTION_MESSAGE(
20012, "Inform message must contain uint parameter 'term'");
}
_constituent.update(slice.get("id").copyString(), slice.get("term").getUInt());
if (!slice.hasKey("active") || !slice.get("active").isArray()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
20014, "Inform message must contain array 'active'");
}
if (!slice.hasKey("pool") || !slice.get("pool").isObject()) {
THROW_ARANGO_EXCEPTION_MESSAGE(
20015, "Inform message must contain object 'pool'");
}
_config.update(message);
_state.persistActiveAgents(_config.activeToBuilder(), _config.poolToBuilder());
}
// Rebuild key value stores
bool Agent::rebuildDBs() {

View File

@ -151,13 +151,20 @@ class Agent : public arangodb::Thread {
/// @brief Serve active agent interface
bool serveActiveAgent();
/// @brief Start constituent
void startConstituent();
/// @brief Get notification as inactve pool member
void notify(query_t const&);
/// State reads persisted state and prepares the agent
friend class State;
private:
/// @brief Notify inactive pool members of changes in configuration
void notifyInactive() const;
/// @brief Activate this agent in single agent mode.
bool activateAgency();

View File

@ -230,7 +230,7 @@ bool config_t::poolComplete() const {
}
query_t const config_t::activeToBuilder () const {
query_t config_t::activeToBuilder () const {
query_t ret = std::make_shared<arangodb::velocypack::Builder>();
ret->openArray();
{
@ -243,7 +243,7 @@ query_t const config_t::activeToBuilder () const {
return ret;
}
query_t const config_t::poolToBuilder () const {
query_t config_t::poolToBuilder () const {
query_t ret = std::make_shared<arangodb::velocypack::Builder>();
ret->openObject();
{
@ -257,6 +257,26 @@ query_t const config_t::poolToBuilder () const {
}
void config_t::update(query_t const& message) {
VPackSlice slice = message->slice();
std::map<std::string,std::string> pool;
for (auto const& p : VPackObjectIterator(slice.get("pool"))) {
pool[p.key.copyString()] = p.value.copyString();
}
std::vector<std::string> active;
for (auto const& a : VPackArrayIterator(slice.get("active"))) {
active.push_back(a.copyString());
}
WRITE_LOCKER(writeLocker, _lock);
if (pool != _pool) {
_pool = pool;
}
if (active != _active) {
_active = active;
}
}
/// @brief override this configuration with prevailing opinion (startup)
void config_t::override(VPackSlice const& conf) {
WRITE_LOCKER(writeLocker, _lock);
@ -347,7 +367,7 @@ void config_t::override(VPackSlice const& conf) {
/// @brief vpack representation
query_t const config_t::toBuilder() const {
query_t config_t::toBuilder() const {
query_t ret = std::make_shared<arangodb::velocypack::Builder>();
ret->openObject();
{
@ -519,7 +539,7 @@ bool config_t::merge(VPackSlice const& conf) {
_compactionStepSize = conf.get(compactionStepSizeStr).getUInt();
ss << _compactionStepSize << " (persisted)";
} else {
_compactionStepSize = 2.5;
_compactionStepSize = 1000;
ss << _compactionStepSize << " (default)";
}
} else {

View File

@ -95,7 +95,10 @@ struct config_t {
/// @brief move assignment operator
config_t& operator= (config_t&&);
/// @brief update leadership changes
void update(query_t const&);
/// @brief agent id
std::string id() const;
@ -138,8 +141,8 @@ struct config_t {
/// @brief of active agents
query_t const activeToBuilder () const;
query_t const poolToBuilder () const;
query_t activeToBuilder () const;
query_t poolToBuilder () const;
/// @brief override this configuration with prevailing opinion (startup)
@ -147,7 +150,7 @@ struct config_t {
/// @brief vpack representation
query_t const toBuilder() const;
query_t toBuilder() const;
/// @brief set id

View File

@ -394,6 +394,13 @@ void Constituent::callElection() {
}
void Constituent::update(std::string const& leaderID, term_t t) {
MUTEX_LOCKER(guard, _castLock);
_leaderID = leaderID;
_term = t;
}
/// Start clean shutdown
void Constituent::beginShutdown() {
@ -408,7 +415,7 @@ void Constituent::beginShutdown() {
/// Start operation
bool Constituent::start(TRI_vocbase_t* vocbase,
aql::QueryRegistry* queryRegistry) {
TRI_ASSERT(vocbase != nullptr);
_vocbase = vocbase;
_queryRegistry = queryRegistry;
@ -420,7 +427,9 @@ bool Constituent::start(TRI_vocbase_t* vocbase,
/// Get persisted information and run election process
void Constituent::run() {
LOG(WARN) << "Starting constituent";
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Pool complete. Starting constituent personality";
_id = _agent->config().id();
TRI_ASSERT(_vocbase != nullptr);
@ -455,7 +464,7 @@ void Constituent::run() {
std::vector<std::string> act = _agent->config().active();
while(!this->isStopping() &&
((size_t)(find(act.begin(), act.end(), _id) - act.begin()) >= size())) {
std::this_thread::sleep_for(std::chrono::milliseconds(1000));
std::this_thread::sleep_for(std::chrono::milliseconds(5000));
}
if (size() == 1) {

View File

@ -94,7 +94,13 @@ class Constituent : public arangodb::Thread {
bool start(TRI_vocbase_t* vocbase, aql::QueryRegistry*);
friend class Agent;
private:
/// @brief update leaderId and term if inactive
void update(std::string const&, term_t);
/// @brief set term to new term
void term(term_t);

View File

@ -133,15 +133,26 @@ RestHandler::status RestAgencyHandler::handleWrite() {
if (query->slice().length() == 0) {
Builder body;
body.openObject();
body.add(
"message", VPackValue("Empty request."));
body.add("message", VPackValue("Empty request."));
body.close();
generateResult(GeneralResponse::ResponseCode::BAD, body.slice());
return status::DONE;
}
auto s = std::chrono::system_clock::now(); // Leadership established?
std::chrono::seconds timeout(1);
while(_agent->size() > 1 && _agent->leaderID() == "") {
std::this_thread::sleep_for(duration_t(100));
if ((std::chrono::system_clock::now()-s) > timeout) {
Builder body;
body.openObject();
body.add("message", VPackValue("No leader"));
body.close();
generateResult(GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE,
body.slice());
LOG_TOPIC(ERR, Logger::AGENCY) << "We don't know who the leader is";
return status::DONE;
}
}
write_ret_t ret = _agent->write(query);
@ -215,8 +226,20 @@ inline RestHandler::status RestAgencyHandler::handleRead() {
return status::DONE;
}
auto s = std::chrono::system_clock::now(); // Leadership established?
std::chrono::seconds timeout(1);
while(_agent->size() > 1 && _agent->leaderID() == "") {
std::this_thread::sleep_for(duration_t(100));
if ((std::chrono::system_clock::now()-s) > timeout) {
Builder body;
body.openObject();
body.add("message", VPackValue("No leader"));
body.close();
generateResult(GeneralResponse::ResponseCode::SERVICE_UNAVAILABLE,
body.slice());
LOG_TOPIC(ERR, Logger::AGENCY) << "We don't know who the leader is";
return status::DONE;
}
}
read_ret_t ret = _agent->read(query);

View File

@ -130,20 +130,24 @@ RestHandler::status RestAgencyPrivHandler::execute() {
return reportBadQuery(); // bad query
}
} else if (_request->suffix()[0] == "gossip") {
//if (_agent->serveActiveAgent()) { // only during startup (see Agent)
arangodb::velocypack::Options options;
query_t query = _request->toVelocyPackBuilderPtr(&options);
try {
query_t ret = _agent->gossip(query);
result.add("id",ret->slice().get("id"));
result.add("endpoint",ret->slice().get("endpoint"));
result.add("pool",ret->slice().get("pool"));
} catch (std::exception const& e) {
return reportBadQuery(e.what());
}
//} else { // Gone!
// return reportGone();
// }
arangodb::velocypack::Options options;
query_t query = _request->toVelocyPackBuilderPtr(&options);
try {
query_t ret = _agent->gossip(query);
result.add("id",ret->slice().get("id"));
result.add("endpoint",ret->slice().get("endpoint"));
result.add("pool",ret->slice().get("pool"));
} catch (std::exception const& e) {
return reportBadQuery(e.what());
}
} else if (_request->suffix()[0] == "inform") {
arangodb::velocypack::Options options;
query_t query = _request->toVelocyPackBuilderPtr(&options);
try {
_agent->notify(query);
} catch (std::exception const& e) {
return reportBadQuery(e.what());
}
} else {
generateError(GeneralResponse::ResponseCode::NOT_FOUND,
404); // nothing else here

View File

@ -50,7 +50,8 @@ Supervision::Supervision()
_frequency(5),
_gracePeriod(15),
_jobId(0),
_jobIdMax(0) {}
_jobIdMax(0),
_selfShutdown(false) {}
Supervision::~Supervision() { shutdown(); };
@ -132,7 +133,7 @@ std::vector<check_t> Supervision::checkDBServers() {
report->add("LastHeartbeatAcked",
VPackValue(
timepointToString(std::chrono::system_clock::now())));
report->add("Status", VPackValue("GOOD"));
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_GOOD));
} else {
std::chrono::seconds t{0};
t = std::chrono::duration_cast<std::chrono::seconds>(
@ -254,17 +255,17 @@ std::vector<check_t> Supervision::checkCoordinators() {
report->add("LastHeartbeatAcked",
VPackValue(
timepointToString(std::chrono::system_clock::now())));
report->add("Status", VPackValue("GOOD"));
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_GOOD));
} else {
std::chrono::seconds t{0};
t = std::chrono::duration_cast<std::chrono::seconds>(
std::chrono::system_clock::now()-stringToTimepoint(lastHeartbeatAcked));
if (t.count() > _gracePeriod) { // Failure
if (lastStatus == "BAD") {
report->add("Status", VPackValue("FAILED"));
if (lastStatus == Supervision::HEALTH_STATUS_BAD) {
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_FAILED));
}
} else {
report->add("Status", VPackValue("BAD"));
report->add("Status", VPackValue(Supervision::HEALTH_STATUS_BAD));
}
}
@ -346,8 +347,15 @@ void Supervision::run() {
while (!this->isStopping()) {
updateSnapshot();
// mop: always do health checks so shutdown is able to detect if a server failed otherwise
if (_agent->leading()) {
doChecks();
}
if (isShuttingDown()) {
handleShutdown();
} else if (_selfShutdown) {
ApplicationServer::server->beginShutdown();
} else if (_agent->leading()) {
if (!handleJobs()) {
break;
@ -365,21 +373,57 @@ bool Supervision::isShuttingDown() {
}
}
bool Supervision::serverGood(const std::string& serverName) {
try {
const std::string serverStatus(healthPrefix + serverName + "/Status");
const std::string status = _snapshot(serverStatus).getString();
return status == Supervision::HEALTH_STATUS_GOOD;
} catch (...) {
return false;
}
}
void Supervision::handleShutdown() {
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Initiating shutdown";
_selfShutdown = true;
LOG_TOPIC(DEBUG, Logger::AGENCY) << "Waiting for clients to shut down";
Node::Children const& serversRegistered = _snapshot(currentServersRegisteredPrefix).children();
bool serversCleared = true;
for (auto const& server : serversRegistered) {
if (server.first == "Version") {
continue;
}
LOG_TOPIC(DEBUG, Logger::AGENCY)
<< "Waiting for " << server.first << " to shutdown";
if (!serverGood(server.first)) {
LOG_TOPIC(WARN, Logger::AGENCY)
<< "Server " << server.first << " did not shutdown properly it seems!";
continue;
}
serversCleared = false;
}
if (serversCleared) {
ApplicationServer::server->beginShutdown();
if (_agent->leading()) {
query_t del = std::make_shared<Builder>();
del->openArray();
del->openArray();
del->openObject();
del->add(_agencyPrefix + "/Shutdown", VPackValue(VPackValueType::Object));
del->add("op", VPackValue("delete"));
del->close();
del->close(); del->close(); del->close();
auto result = _agent->write(del);
if (result.indices.size() != 1) {
LOG(ERR) << "Invalid resultsize of " << result.indices.size() << " found during shutdown";
} else {
if (!_agent->waitFor(result.indices.at(0))) {
LOG(ERR) << "Result was not written to followers during shutdown";
}
}
}
}
}
@ -390,7 +434,6 @@ bool Supervision::handleJobs() {
}
// Do supervision
doChecks();
shrinkCluster();
workJobs();
@ -398,7 +441,6 @@ bool Supervision::handleJobs() {
}
void Supervision::workJobs() {
Node::Children const& todos = _snapshot(toDoPrefix).children();
Node::Children const& pends = _snapshot(pendingPrefix).children();

View File

@ -108,6 +108,9 @@ class Supervision : public arangodb::Thread {
void wakeUp();
private:
static constexpr const char* HEALTH_STATUS_GOOD = "GOOD";
static constexpr const char* HEALTH_STATUS_BAD = "BAD";
static constexpr const char* HEALTH_STATUS_FAILED = "FAILED";
/// @brief Update agency prefix from agency itself
bool updateAgencyPrefix (size_t nTries = 10, int intervalSec = 1);
@ -164,6 +167,18 @@ class Supervision : public arangodb::Thread {
long _gracePeriod;
uint64_t _jobId;
uint64_t _jobIdMax;
// mop: this feels very hacky...we have a hen and egg problem here
// we are using /Shutdown in the agency to determine that the cluster should
// shutdown. When every member is down we should of course not persist this
// flag so we don't immediately initiate shutdown after restart. we use this
// flag to temporarily store that shutdown was initiated...when the /Shutdown
// stuff has been removed we shutdown ourselves. The assumption (heheh...) is
// that while the cluster is shutting down every agent hit the shutdown stuff
// at least once so this flag got set at some point
bool _selfShutdown;
bool serverGood(const std::string&);
static std::string _agencyPrefix;
};

View File

@ -801,7 +801,7 @@ bool AstNode::isOnlyEqualityMatch() const {
}
for (size_t i = 0; i < numMembers(); ++i) {
auto op = getMember(i);
auto op = getMemberUnchecked(i);
if (op->type != arangodb::aql::NODE_TYPE_OPERATOR_BINARY_EQ) {
return false;
}

View File

@ -1160,7 +1160,7 @@ void ExecutionNode::RegisterPlan::after(ExecutionNode* en) {
if (it2 == varInfo.end()) {
// report an error here to prevent crashing
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "missing variable #" + std::to_string(v->id) + " while planning registers");
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, "missing variable #" + std::to_string(v->id) + " for node " + en->getTypeString() + " while planning registers");
}
// finally adjust the variable inside the IN calculation

View File

@ -79,24 +79,37 @@ V8Expression* Executor::generateExpression(AstNode const* node) {
constantValues->ForceSet(TRI_V8_STD_STRING(name), toV8(isolate, it.first));
}
// compile the expression
v8::Handle<v8::Value> func(compileExpression());
TRI_ASSERT(_buffer != nullptr);
// exit early if an error occurred
HandleV8Error(tryCatch, func);
v8::Handle<v8::Script> compiled = v8::Script::Compile(
TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--"));
// a "simple" expression here is any expression that will only return
// non-cyclic
// data and will not return any special JavaScript types such as Date, RegExp
// or
// Function
// as we know that all built-in AQL functions are simple but do not know
// anything
// about user-defined functions, so we expect them to be non-simple
bool const isSimple = (!node->callsUserDefinedFunction());
if (! compiled.IsEmpty()) {
v8::Handle<v8::Value> func(compiled->Run());
return new V8Expression(isolate, v8::Handle<v8::Function>::Cast(func),
constantValues, isSimple);
// exit early if an error occurred
HandleV8Error(tryCatch, func, _buffer, false);
// a "simple" expression here is any expression that will only return
// non-cyclic
// data and will not return any special JavaScript types such as Date, RegExp
// or
// Function
// as we know that all built-in AQL functions are simple but do not know
// anything
// about user-defined functions, so we expect them to be non-simple
bool const isSimple = (!node->callsUserDefinedFunction());
return new V8Expression(isolate, v8::Handle<v8::Function>::Cast(func),
constantValues, isSimple);
}
else {
v8::Handle<v8::Value> empty;
HandleV8Error(tryCatch, empty, _buffer, true);
// well we're almost sure we never reach this since the above call should throw:
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
}
/// @brief executes an expression directly
@ -114,43 +127,56 @@ int Executor::executeExpression(Query* query, AstNode const* node,
v8::HandleScope scope(isolate);
v8::TryCatch tryCatch;
// compile the expression
v8::Handle<v8::Value> func(compileExpression());
// exit early if an error occurred
HandleV8Error(tryCatch, func);
TRI_ASSERT(_buffer != nullptr);
TRI_ASSERT(query != nullptr);
v8::Handle<v8::Script> compiled = v8::Script::Compile(
TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--"));
TRI_GET_GLOBALS();
v8::Handle<v8::Value> result;
auto old = v8g->_query;
if (! compiled.IsEmpty()) {
try {
v8g->_query = static_cast<void*>(query);
TRI_ASSERT(v8g->_query != nullptr);
v8::Handle<v8::Value> func(compiled->Run());
// execute the function
v8::Handle<v8::Value> args[] = { v8::Object::New(isolate), v8::Object::New(isolate) };
result = v8::Handle<v8::Function>::Cast(func)
->Call(v8::Object::New(isolate), 2, args);
// exit early if an error occurred
HandleV8Error(tryCatch, func, _buffer, false);
v8g->_query = old;
TRI_ASSERT(query != nullptr);
// exit if execution raised an error
HandleV8Error(tryCatch, result);
} catch (...) {
v8g->_query = old;
throw;
TRI_GET_GLOBALS();
v8::Handle<v8::Value> result;
auto old = v8g->_query;
try {
v8g->_query = static_cast<void*>(query);
TRI_ASSERT(v8g->_query != nullptr);
// execute the function
v8::Handle<v8::Value> args[] = { v8::Object::New(isolate), v8::Object::New(isolate) };
result = v8::Handle<v8::Function>::Cast(func)
->Call(v8::Object::New(isolate), 2, args);
v8g->_query = old;
// exit if execution raised an error
HandleV8Error(tryCatch, result, _buffer, false);
} catch (...) {
v8g->_query = old;
throw;
}
if (result->IsUndefined()) {
// undefined => null
builder.add(VPackValue(VPackValueType::Null));
return TRI_ERROR_NO_ERROR;
}
return TRI_V8ToVPack(isolate, builder, result, false);
}
else {
v8::Handle<v8::Value> empty;
HandleV8Error(tryCatch, empty, _buffer, true);
if (result->IsUndefined()) {
// undefined => null
builder.add(VPackValue(VPackValueType::Null));
return TRI_ERROR_NO_ERROR;
}
return TRI_V8ToVPack(isolate, builder, result, false);
// well we're almost sure we never reach this since the above call should throw:
THROW_ARANGO_EXCEPTION(TRI_ERROR_INTERNAL);
}
}
/// @brief returns a reference to a built-in function
@ -277,7 +303,9 @@ v8::Handle<v8::Value> Executor::toV8(v8::Isolate* isolate,
/// @brief checks if a V8 exception has occurred and throws an appropriate C++
/// exception from it if so
void Executor::HandleV8Error(v8::TryCatch& tryCatch,
v8::Handle<v8::Value>& result) {
v8::Handle<v8::Value>& result,
arangodb::basics::StringBuffer* const buffer,
bool duringCompile) {
ISOLATE;
if (tryCatch.HasCaught()) {
@ -326,6 +354,11 @@ void Executor::HandleV8Error(v8::TryCatch& tryCatch,
// exception is no ArangoError
std::string details(TRI_ObjectToString(tryCatch.Exception()));
if (buffer) {
std::string script(buffer->c_str(), buffer->length());
LOG(ERR) << details << " " << script;
details += "\nSee log for more details";
}
if (*stacktrace && stacktrace.length() > 0) {
details += "\nstacktrace of offending AQL function: ";
details += *stacktrace;
@ -334,35 +367,36 @@ void Executor::HandleV8Error(v8::TryCatch& tryCatch,
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_QUERY_SCRIPT, details);
}
std::string msg("unknown error in scripting");
if (duringCompile) {
msg += " (during compilation)";
}
if (buffer) {
std::string script(buffer->c_str(), buffer->length());
LOG(ERR) << msg << " " << script;
msg += " See log for details";
}
// we can't figure out what kind of error occurred and throw a generic error
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unknown error in scripting");
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg);
}
if (result.IsEmpty()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unknown error in scripting");
std::string msg("unknown error in scripting");
if (duringCompile) {
msg += " (during compilation)";
}
if (buffer) {
std::string script(buffer->c_str(), buffer->length());
LOG(ERR) << msg << " " << script;
msg += " See log for details";
}
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg);
}
// if we get here, no exception has been raised
}
/// @brief compile a V8 function from the code contained in the buffer
v8::Handle<v8::Value> Executor::compileExpression() {
TRI_ASSERT(_buffer != nullptr);
ISOLATE;
v8::Handle<v8::Script> compiled = v8::Script::Compile(
TRI_V8_STD_STRING((*_buffer)), TRI_V8_ASCII_STRING("--script--"));
if (compiled.IsEmpty()) {
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL,
"unable to compile v8 expression");
}
return compiled->Run();
}
/// @brief generate JavaScript code for an arbitrary expression
void Executor::generateCodeExpression(AstNode const* node) {
// initialize and/or clear the buffer

View File

@ -64,7 +64,7 @@ class Executor {
/// @brief checks if a V8 exception has occurred and throws an appropriate C++
/// exception from it if so
static void HandleV8Error(v8::TryCatch&, v8::Handle<v8::Value>&);
static void HandleV8Error(v8::TryCatch&, v8::Handle<v8::Value>&, arangodb::basics::StringBuffer* const, bool duringCompile);
private:
/// @brief traverse the expression and note all user-defined functions
@ -153,9 +153,6 @@ class Executor {
/// @brief create the string buffer
arangodb::basics::StringBuffer* initializeBuffer();
/// @brief compile a V8 function from the code contained in the buffer
v8::Handle<v8::Value> compileExpression();
private:
/// @brief a string buffer used for operations
arangodb::basics::StringBuffer* _buffer;

View File

@ -844,6 +844,7 @@ AqlValue Expression::executeSimpleExpressionReference(
std::string msg("variable not found '");
msg.append(v->name);
msg.append("' in executeSimpleExpression()");
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg.c_str());
}

View File

@ -1798,7 +1798,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
auto const& indexes = indexNode->getIndexes();
auto cond = indexNode->condition();
TRI_ASSERT(cond != nullptr);
Variable const* outVariable = indexNode->outVariable();
TRI_ASSERT(outVariable != nullptr);
@ -1817,7 +1817,7 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
return true;
}
if (!isSparse) {
if (isSparse) {
return true;
}
@ -1831,6 +1831,8 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
// all indexes use the same attributes and index conditions guarantee
// sorted output
}
TRI_ASSERT(indexes.size() == 1 || cond->isSorted());
// if we get here, we either have one index or multiple indexes on the same
// attributes
@ -1878,11 +1880,16 @@ struct SortToIndexNode final : public WalkerWorker<ExecutionNode> {
if (numCovered == sortCondition.numAttributes() &&
sortCondition.isUnidirectional() &&
(isSorted || fields.size() == sortCondition.numAttributes())) {
(isSorted || fields.size() >= sortCondition.numAttributes())) {
// no need to sort
_plan->unlinkNode(_plan->getNodeById(_sortNode->id()));
indexNode->reverse(sortCondition.isDescending());
_modified = true;
} else if (numCovered > 0 && sortCondition.isUnidirectional()) {
// remove the first few attributes if they are constant
SortNode* sortNode = static_cast<SortNode*>(_plan->getNodeById(_sortNode->id()));
sortNode->removeConditions(numCovered);
_modified = true;
}
}
}

View File

@ -429,6 +429,6 @@ double ShortestPathNode::estimateCost(size_t& nrItems) const {
edgesCount += edges;
}
nrItems = edgesCount + static_cast<size_t>(log(nodesEstimate) * nodesEstimate);
nrItems = edgesCount + static_cast<size_t>(std::log2(nodesEstimate) * nodesEstimate);
return depCost + nrItems;
}

View File

@ -160,21 +160,21 @@ size_t SortCondition::coveredAttributes(
// no match
bool isConstant = false;
if (IsContained(_constAttributes, indexAttributes[i])) {
if (IsContained(indexAttributes, field.second) &&
IsContained(_constAttributes, field.second)) {
// no field match, but a constant attribute
isConstant = true;
++fieldsPosition;
++numCovered;
}
if (!isConstant &&
IsContained(_constAttributes, indexAttributes[i])) {
// no field match, but a constant attribute
isConstant = true;
++i; // next index field
}
if (!isConstant) {
if (IsContained(indexAttributes, field.second) &&
IsContained(_constAttributes, field.second)) {
// no field match, but a constant attribute
isConstant = true;
++fieldsPosition;
++numCovered;
}
}
if (!isConstant) {
break;

View File

@ -121,6 +121,12 @@ bool SortNode::simplify(ExecutionPlan* plan) {
return _elements.empty();
}
void SortNode::removeConditions(size_t count) {
TRI_ASSERT(_elements.size() > count);
TRI_ASSERT(count > 0);
_elements.erase(_elements.begin(), _elements.begin() + count);
}
/// @brief returns all sort information
SortInformation SortNode::getSortInformation(
@ -178,5 +184,5 @@ double SortNode::estimateCost(size_t& nrItems) const {
if (nrItems <= 3.0) {
return depCost + nrItems;
}
return depCost + nrItems * log(static_cast<double>(nrItems));
return depCost + nrItems * std::log2(static_cast<double>(nrItems));
}

View File

@ -113,6 +113,11 @@ class SortNode : public ExecutionNode {
/// simplification, and false otherwise
bool simplify(ExecutionPlan*);
/// @brief removes the first count conditions from the sort condition
/// this can be used if the first conditions of the condition are constant
/// values (e.g. when a FILTER condition exists that guarantees this)
void removeConditions(size_t count);
private:
/// @brief pairs, consisting of variable and sort direction
/// (true = ascending | false = descending)

View File

@ -84,6 +84,14 @@ TraversalBlock::TraversalBlock(ExecutionEngine* engine, TraversalNode const* ep)
TRI_ASSERT(it->second.registerId < ExecutionNode::MaxRegisterId);
inRegsCur.emplace_back(it->second.registerId);
}
for (auto const& v : ep->_conditionVariables) {
inVarsCur.emplace_back(v);
auto it = ep->getRegisterPlan()->varInfo.find(v->id);
TRI_ASSERT(it != ep->getRegisterPlan()->varInfo.end());
TRI_ASSERT(it->second.registerId < ExecutionNode::MaxRegisterId);
inRegsCur.emplace_back(it->second.registerId);
}
}
}
@ -170,6 +178,7 @@ int TraversalBlock::initialize() {
void TraversalBlock::executeExpressions() {
DEBUG_BEGIN_BLOCK();
AqlItemBlock* cur = _buffer.front();
size_t index = 0;
for (auto& map : *_expressions) {
for (size_t i = 0; i < map.second.size(); ++i) {
// Right now no inVars are allowed.
@ -178,8 +187,8 @@ void TraversalBlock::executeExpressions() {
if (it != nullptr && it->expression != nullptr) {
// inVars and inRegs needs fixx
bool mustDestroy;
AqlValue a = it->expression->execute(_trx, cur, _pos, _inVars[i],
_inRegs[i], mustDestroy);
AqlValue a = it->expression->execute(_trx, cur, _pos, _inVars[index],
_inRegs[index], mustDestroy);
AqlValueGuard guard(a, mustDestroy);
@ -196,6 +205,7 @@ void TraversalBlock::executeExpressions() {
it->compareTo.reset(builder);
}
++index;
}
}
throwIfKilled(); // check if we were aborted

View File

@ -702,11 +702,10 @@ void TraversalNode::setCondition(arangodb::aql::Condition* condition) {
Ast::getReferencedVariables(condition->root(), varsUsedByCondition);
for (auto const& oneVar : varsUsedByCondition) {
if ((_vertexOutVariable != nullptr &&
oneVar->id != _vertexOutVariable->id) &&
(_edgeOutVariable != nullptr && oneVar->id != _edgeOutVariable->id) &&
(_pathOutVariable != nullptr && oneVar->id != _pathOutVariable->id) &&
(_inVariable != nullptr && oneVar->id != _inVariable->id)) {
if ((_vertexOutVariable == nullptr || oneVar->id != _vertexOutVariable->id) &&
(_edgeOutVariable == nullptr || oneVar->id != _edgeOutVariable->id) &&
(_pathOutVariable == nullptr || oneVar->id != _pathOutVariable->id) &&
(_inVariable == nullptr || oneVar->id != _inVariable->id)) {
_conditionVariables.emplace_back(oneVar);
}
}

View File

@ -135,7 +135,7 @@ AqlValue V8Expression::execute(v8::Isolate* isolate, Query* query,
v8g->_query = old;
Executor::HandleV8Error(tryCatch, result);
Executor::HandleV8Error(tryCatch, result, nullptr, false);
} catch (...) {
v8g->_query = old;
// bubble up exception

View File

@ -1276,10 +1276,11 @@ size_t ClusterComm::performRequests(std::vector<ClusterCommRequest>& requests,
} else if (res.status == CL_COMM_BACKEND_UNAVAILABLE ||
(res.status == CL_COMM_TIMEOUT && !res.sendWasComplete)) {
requests[index].result = res;
// In this case we will retry:
dueTime[index] = (std::min)(10.0,
(std::max)(0.2, 2 * (now - startTime))) +
startTime;
now;
if (dueTime[index] >= endTime) {
requests[index].done = true;
nrDone++;

View File

@ -142,7 +142,7 @@ void ImportFeature::collectOptions(
"action to perform when a unique key constraint "
"violation occurs. Possible values: " +
actionsJoined,
new DiscreteValuesParameter<StringParameter>(&_typeImport, actions));
new DiscreteValuesParameter<StringParameter>(&_onDuplicateAction, actions));
}
void ImportFeature::validateOptions(

File diff suppressed because one or more lines are too long

View File

@ -2739,4 +2739,4 @@ var cutByResolution = function (str) {
</div>
<div id="workMonitorContent" class="innerContent">
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img class="arangodbLogo" src="img/arangodb_logo_big.png"></a><a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a><a class="version"><span>VERSION: </span><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="graphSettingsContent" style="display: none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1471438286364"></script><script src="app.js?version=1471438286364"></script></body></html>
</div></script></head><body><nav class="navbar" style="display: none"><div class="primary"><div class="navlogo"><a class="logo big" href="#"><img class="arangodbLogo" src="img/arangodb_logo_big.png"></a><a class="logo small" href="#"><img class="arangodbLogo" src="img/arangodb_logo_small.png"></a><a class="version"><span>VERSION: </span><span id="currentVersion"></span></a></div><div class="statmenu" id="statisticBar"></div><div class="navmenu" id="navigationBar"></div></div></nav><div id="modalPlaceholder"></div><div class="bodyWrapper" style="display: none"><div class="centralRow"><div id="navbar2" class="navbarWrapper secondary"><div class="subnavmenu" id="subNavigationBar"></div></div><div class="resizecontainer contentWrapper"><div id="loadingScreen" class="loadingScreen" style="display: none"><i class="fa fa-circle-o-notch fa-spin fa-3x fa-fw margin-bottom"></i> <span class="sr-only">Loading...</span></div><div id="content" class="centralContent"></div><footer class="footer"><div id="footerBar"></div></footer></div></div></div><div id="progressPlaceholder" style="display:none"></div><div id="spotlightPlaceholder" style="display:none"></div><div id="graphSettingsContent" style="display: none"></div><div id="offlinePlaceholder" style="display:none"><div class="offline-div"><div class="pure-u"><div class="pure-u-1-4"></div><div class="pure-u-1-2 offline-window"><div class="offline-header"><h3>You have been disconnected from the server</h3></div><div class="offline-body"><p>The connection to the server has been lost. The server may be under heavy load.</p><p>Trying to reconnect in <span id="offlineSeconds">10</span> seconds.</p><p class="animation_state"><span><button class="button-success">Reconnect now</button></span></p></div></div><div class="pure-u-1-4"></div></div></div></div><div class="arangoFrame" style=""><div class="outerDiv"><div class="innerDiv"></div></div></div><script src="libs.js?version=1472022235976"></script><script src="app.js?version=1472022235976"></script></body></html>

File diff suppressed because one or more lines are too long

View File

@ -824,15 +824,16 @@
this.waitForInit(this.graphManagement.bind(this));
return;
}
if (!this.graphManagementView) {
this.graphManagementView =
new window.GraphManagementView(
{
collection: new window.GraphCollection(),
collectionCollection: this.arangoCollectionsStore
}
);
if (this.graphManagementView) {
this.graphManagementView.undelegateEvents();
}
this.graphManagementView =
new window.GraphManagementView(
{
collection: new window.GraphCollection(),
collectionCollection: this.arangoCollectionsStore
}
);
this.graphManagementView.render();
},

View File

@ -55,7 +55,6 @@
e.preventDefault();
var name = $(e.currentTarget).parent().parent().attr('id');
name = name.substr(0, name.length - 5);
console.log(name);
window.App.navigate('graph2/' + encodeURIComponent(name), {trigger: true});
},

View File

@ -241,11 +241,21 @@
'change input[type="color"]': 'checkColor',
'change select': 'saveGraphSettings',
'focus #graphSettingsView input': 'lastFocus',
'focus #graphSettingsView select': 'lastFocus'
'focus #graphSettingsView select': 'lastFocus',
'focusout #graphSettingsView input[type="text"]': 'checkinput'
},
lastFocus: function (e) {
this.lastFocussed = e.currentTarget.id;
this.lastFocussedValue = $(e.currentTarget).val();
},
checkinput: function (e) {
if (e.currentTarget.id === this.lastFocussed) {
if (this.lastFocussedValue !== $(e.currentTarget).val()) {
this.saveGraphSettings();
}
}
},
checkEnterKey: function (e) {

View File

@ -102,6 +102,10 @@
sigma.classes.graph.addMethod('getNodeEdgesCount', function (id) {
return this.allNeighborsCount[id];
});
sigma.classes.graph.addMethod('getNodesCount', function () {
return this.nodesArray.length;
});
} catch (ignore) {}
},
@ -503,7 +507,6 @@
addNode: function () {
var self = this;
var x = self.addNodeX / 100;
var y = self.addNodeY / 100;
@ -518,7 +521,7 @@
self.currentGraph.graph.addNode({
id: id,
label: id.split('/')[1] || '',
size: self.graphConfig.nodeSize || Math.random(),
size: self.graphConfig.nodeSize || 15,
color: self.graphConfig.nodeColor || '#2ecc71',
x: x,
y: y
@ -985,7 +988,7 @@
wheel.colors = hotaru;
wheel.multiSelect = false;
wheel.clickModeRotate = false;
wheel.sliceHoverAttr = {stroke: '#fff', 'stroke-width': 2};
wheel.sliceHoverAttr = {stroke: '#fff', 'stroke-width': 4};
wheel.slicePathFunction = slicePath().DonutSlice;
wheel.createWheel([icon.edit, icon.trash, icon.flag, icon.connect, icon.expand]);
@ -1133,8 +1136,8 @@
}
});
$('#nodesCount').text(parseInt($('#nodesCount').text()) + newNodeCounter);
$('#edgesCount').text(parseInt($('#edgesCount').text()) + newEdgeCounter);
$('#nodesCount').text(parseInt($('#nodesCount').text(), 10) + newNodeCounter);
$('#edgesCount').text(parseInt($('#edgesCount').text(), 10) + newEdgeCounter);
// rerender graph
if (newNodeCounter > 0 || newEdgeCounter > 0) {
@ -1507,18 +1510,6 @@
}
};
s.bind('rightClickStage', function (e) {
self.addNodeX = e.data.captor.x;
self.addNodeY = e.data.captor.y;
self.createContextMenu(e);
self.clearMouseCanvas();
});
s.bind('rightClickNode', function (e) {
var nodeId = e.data.node.id;
self.createNodeContextMenu(nodeId, e);
});
s.bind('clickNode', function (e) {
if (self.contextState.createEdge === true) {
// create the edge
@ -1531,6 +1522,12 @@
self.addEdgeModal(foundEdgeDefinitions, self.contextState._from, self.contextState._to);
} else {
if (!self.dragging) {
if (self.contextState.createEdge === true) {
self.newEdgeColor = '#ff0000';
} else {
self.newEdgeColor = '#000000';
}
// halo on active nodes:
if (renderer === 'canvas') {
self.currentGraph.renderers[0].halo({
@ -1548,34 +1545,42 @@
nodes: [e.data.node]
});
}
if (!this.aqlMode) {
self.createNodeContextMenu(e.data.node.id, e);
}
}
}
});
s.bind('clickStage', function () {
self.clearOldContextMenu(true);
self.clearMouseCanvas();
s.renderers[0].halo({
nodes: self.activeNodes
});
s.bind('clickStage', function (e) {
if (e.data.captor.isDragging) {
self.clearOldContextMenu(true);
self.clearMouseCanvas();
} else {
// stage menu
if (!$('#nodeContextMenu').is(':visible')) {
var offset = $('#graph-container').offset();
self.addNodeX = sigma.utils.getX(e) - offset.left / 2;
self.addNodeY = sigma.utils.getY(e) - offset.top / 2;
// self.addNodeX = e.data.captor.x;
// self.addNodeY = e.data.captor.y;
self.createContextMenu(e);
self.clearMouseCanvas();
} else {
// cleanup
self.clearOldContextMenu(true);
self.clearMouseCanvas();
}
// remember halo
s.renderers[0].halo({
nodes: self.activeNodes
});
}
});
}
s.bind('doubleClickStage', function () {
self.activeNodes = [];
s.graph.nodes().forEach(function (n) {
n.color = n.originalColor;
});
s.graph.edges().forEach(function (e) {
e.color = e.originalColor;
});
$('.nodeInfoDiv').remove();
s.refresh();
});
if (renderer === 'canvas') {
// render parallel edges
if (this.graphConfig) {
@ -1584,57 +1589,70 @@
}
}
s.bind('overNode', function (e) {
if (self.contextState.createEdge === true) {
self.newEdgeColor = '#ff0000';
} else {
self.newEdgeColor = '#000000';
}
});
s.bind('clickEdge', function (e) {
showAttributes(e, false);
});
s.bind('doubleClickNode', function (e) {
var nodeId = e.data.node.id;
var toKeep = s.graph.neighbors(nodeId);
toKeep[nodeId] = e.data.node;
s.graph.nodes().forEach(function (n) {
if (toKeep[n.id]) {
n.color = n.originalColor;
} else {
n.color = '#eee';
}
});
s.graph.edges().forEach(function (e) {
if (toKeep[e.source] && toKeep[e.target]) {
e.color = 'rgb(64, 74, 83)';
} else {
e.color = '#eee';
}
});
s.refresh();
});
s.renderers[0].bind('render', function (e) {
s.renderers[0].halo({
nodes: self.activeNodes
});
});
if (!this.aqlMode) {
s.bind('rightClickNode', function (e) {
var unhighlightNodes = function () {
self.nodeHighlighted = false;
if (s.graph.getNodesCount() < 250) {
self.activeNodes = [];
s.graph.nodes().forEach(function (n) {
n.color = n.originalColor;
});
s.graph.edges().forEach(function (e) {
e.color = e.originalColor;
});
s.refresh({ skipIndexation: true });
}
};
s.bind('rightClickStage', function (e) {
unhighlightNodes();
self.nodeHighlighted = 'undefinedid';
});
s.bind('rightClickNode', function (e) {
if (self.nodeHighlighted !== e.data.node.id) {
var nodeId = e.data.node.id;
self.createNodeContextMenu(nodeId, e);
});
}
var toKeep = s.graph.neighbors(nodeId);
toKeep[nodeId] = e.data.node;
s.graph.nodes().forEach(function (n) {
if (toKeep[n.id]) {
n.color = n.originalColor;
} else {
n.color = '#eee';
}
});
s.graph.edges().forEach(function (e) {
if (toKeep[e.source] && toKeep[e.target]) {
e.color = 'rgb(64, 74, 83)';
} else {
e.color = '#eee';
}
});
self.nodeHighlighted = true;
s.refresh({ skipIndexation: true });
} else {
unhighlightNodes();
}
});
if (this.graphConfig) {
if (this.graphConfig.edgeEditable) {
s.bind('rightClickEdge', function (e) {
s.bind('clickEdge', function (e) {
var edgeId = e.data.edge.id;
self.createEdgeContextMenu(edgeId, e);
});
@ -1792,7 +1810,7 @@
// clear selected nodes state
this.selectedNodes = {};
this.activeNodes = [];
this.currentGraph.refresh();
this.currentGraph.refresh({ skipIndexation: true });
} else {
$('#selectNodes').addClass('activated');
this.graphLasso.activate();

View File

@ -549,7 +549,6 @@
// get cached query if available
var queryObject = this.getCachedQuery();
var self = this;
console.log(queryObject);
if (queryObject !== null && queryObject !== undefined && queryObject !== '') {
this.aqlEditor.setValue(queryObject.query, 1);
@ -1584,7 +1583,7 @@
window.progressView.hide();
var result = self.analyseQuery(data.result);
console.log('Using ' + result.defaultType + ' as data format.');
// console.log('Using ' + result.defaultType + ' as data format.');
if (result.defaultType === 'table') {
$('#outputEditorWrapper' + counter + ' .arangoToolbarTop').after(
'<div id="outputTable' + counter + '" class="outputTable"></div>'

View File

@ -86,7 +86,7 @@
#wheelnav-nodeContextMenu-slice-3,
#wheelnav-nodeContextMenu-slice-4,
#wheelnav-nodeContextMenu-slice-5 {
opacity: .8;
opacity: 1;
}
#wheelnav-nodeContextMenu-title-0,

View File

@ -645,6 +645,10 @@ function runThere (options, instanceInfo, file) {
'return runTest(' + JSON.stringify(file) + ', true);';
}
if (options.propagateInstanceInfo) {
testCode = 'global.instanceInfo = ' + JSON.stringify(instanceInfo) + ';\n' + testCode;
}
let httpOptions = makeAuthorizationHeaders(options);
httpOptions.method = 'POST';
httpOptions.timeout = 3600;
@ -3370,6 +3374,7 @@ testFuncs.replication_sync = function (options) {
testFuncs.resilience = function (options) {
findTests();
options.cluster = true;
options.propagateInstanceInfo = true;
if (options.dbServers < 5) {
options.dbServers = 5;
}
@ -3431,6 +3436,7 @@ testFuncs.server_http = function (options) {
testFuncs.shell_server = function (options) {
findTests();
options.propagateInstanceInfo = true;
return performTests(options, testsCases.server, 'shell_server', runThere);
};

View File

@ -43,42 +43,57 @@ var lessons = [
' number = 123;\n' +
' number = number * 10;'
},
{
title: 'Shell History',
text: 'You can access previously run commands using the up and down keys.\n' +
"It saves you from retyping 'tutorial' every time for instance."
},
{
title: 'Running Complex Instructions',
text: 'You can also run more complex instructions, such as for loops:\n\n' +
' for (i = 0; i < 10; i++) { number = number + 1; }'
' for (var i = 0; i < 10; i++) { number = number + 1; }'
},
{
title: 'Printing Results',
text: 'As you can see, the result of the last command executed is printed automatically. ' +
text: 'As you see, the result of the last command executed is printed automatically.\n' +
'To explicitly print a value at any other time, there is the print function:\n\n' +
' for (i = 0; i < 5; ++i) { print("I am a JavaScript shell"); }'
' for (var i = 0; i < 5; ++i) { print("I am a JavaScript shell"); }'
},
{
title: 'Creating Collections',
text: 'ArangoDB is a document database. This means that we store data as documents ' +
"(which are similar to JavaScript objects) in so-called 'collections'. " +
text: 'ArangoDB is primarily a document database. This means that we store data as\n' +
"documents (which are similar to JavaScript objects) in so-called 'collections'.\n" +
"Let's create a collection named 'places' now:\n\n" +
" db._create('places');\n\n" +
'Note: each collection is identified by a unique name. Trying to create a ' +
'Note: each collection is identified by a unique name. Trying to create a\n' +
'collection that already exists will produce an error.'
},
{
title: 'Displaying Collections',
text: 'Now you can take a look at the collection(s) you just created:\n\n' +
' db._collections();\n\n' +
"Please note that all collections will be returned, including ArangoDB's pre-defined " +
'system collections.'
"Please note that all collections will be returned, including ArangoDB's\n" +
'pre-defined system collections.'
},
{
title: 'Accessing a single collection',
text: 'If you want to access a particular collection, you can either write:\n\n' +
' db.places;\n\n' +
'or the more elaborate alternative:\n\n' +
" db._collection('places');\n\n" +
'Both return a collection object (if the specified collection exists).'
},
{
title: 'Creating Documents',
text: "Now we have a collection, but it is empty. So let's create some documents!\n\n" +
text: "We have a collection, but it is empty. So let's create some documents!\n\n" +
' db.places.save({ _key : "foo", city : "foo-city" });\n' +
' for (i = 0; i <= 10; i++) { db.places.save({ _key: "example" + i, zipcode: i }) };'
' for (var i = 0; i <= 10; i++) {\n' +
' db.places.save({ _key: "example" + i, zipcode: i })\n' +
' };'
},
{
title: 'Displaying All Documents',
text: 'You want to take a look at your docs? No problem:\n\n' +
text: 'You want to take a look at your documents? No problem:\n\n' +
' db.places.toArray();'
},
{
@ -89,37 +104,40 @@ var lessons = [
{
title: 'Retrieving Single Documents',
text: "As you can see, each document has some meta attributes '_id', '_key' and '_rev'.\n" +
"The '_key' attribute can be used to quickly retrieve a single document from " +
"The '_key' attribute can be used to quickly retrieve a single document from\n" +
'a collection:\n\n' +
' db.places.document("foo");\n' +
' db.places.document("example5");'
},
{
title: 'Retrieving Single Documents',
text: "The '_id' attribute can also be used to retrieve documents using the 'db' object:\n\n" +
text: "The '_id' attribute can also be used to retrieve documents using the\n" +
"'db' object:\n\n" +
' db._document("places/foo");\n' +
' db._document("places/example5");'
},
{
title: 'Modifying Documents',
text: 'You can modify existing documents. Try to add a new attribute to a document and ' +
text: 'You can modify existing documents. Try to add a new attribute to a document and\n' +
'verify whether it has been added:\n\n' +
' db._update("places/foo", { zipcode: 39535 });\n' +
' db._document("places/foo");'
},
{
title: 'Document Revisions',
text: "Note that after updating the document, its '_rev' attribute changed automatically.\n" +
"The '_rev' attribute contains a document revision number, and it can be used for " +
"conditional modifications. Here's an example of how to avoid lost updates in case " +
'multiple clients are accessing the documents in parallel:\n\n' +
text: "Note that after updating the document, its '_rev' attribute changed\n" +
'automatically.\n\n' +
"The '_rev' attribute contains a document revision number, and it can be used\n" +
"for conditional modifications. Here's an example of how to avoid lost updates\n" +
'in case multiple clients are accessing the documents in parallel:\n\n' +
' doc = db._document("places/example1");\n' +
' db._update("places/example1", { someValue: 23 });\n' +
' db._update(doc, { someValue: 42 });\n\n' +
'Note that the first update will succeed because it was unconditional. The second ' +
"update however is conditional because we're also passing the document's revision " +
"id in the first parameter to _update. As the revision id we're passing to update " +
"does not match the document's current revision anymore, the update is rejected."
'Note that the first update will succeed because it was unconditional.\n\n' +
"The second update however is conditional because we're also passing the\n" +
"document's revision id in the first parameter to _update. As the revision id\n" +
"we're passing to update does not match the document's current revision anymore,\n" +
'the update is rejected.'
},
{
title: 'Removing Documents',
@ -130,28 +148,41 @@ var lessons = [
},
{
title: 'Searching Documents',
text: 'Searching for documents with specific attributes can be done by using the ' +
'byExample method:\n\n' +
text: 'Searching for documents with specific attributes can be done by using the\n' +
"'byExample' method:\n\n" +
' db._create("users");\n' +
' for (i = 0; i < 10; ++i) { ' +
'db.users.save({ name: "username" + i, active: (i % 3 == 0), age: 30 + i }); }\n' +
' for (var i = 0; i < 10; ++i) {\n' +
' db.users.save({ name: "username" + i, active: (i % 3 == 0), age: 30 + i });\n' +
' }\n' +
' db.users.byExample({ active: false }).toArray();\n' +
' db.users.byExample({ name: "username3", active: true }).toArray();\n'
' db.users.byExample({ name: "username3", active: true }).toArray();'
},
{
title: 'Running AQL Queries',
text: 'ArangoDB also provides a query language for more complex matching:\n\n' +
' db._query("FOR u IN users FILTER u.active == true && u.age >= 33 ' +
'RETURN { username: u.name, age: u.age }").toArray();'
text: 'ArangoDB also provides a query language (AQL) for more complex matching:\n\n' +
' db._query(`\n' +
' FOR u IN users\n' +
' FILTER u.active == true && u.age >= 33\n' +
' RETURN { username: u.name, age: u.age }\n' +
' `).toArray();\n\n' +
'Wrapping multi-line queries in backticks is the most convenient way in\n' +
"today's JavaScript.\n\n" +
'See our online documentation for more details on AQL:\n' +
'https://docs.arangodb.com/'
},
{
title: 'Using Databases',
text: 'By default, the ArangoShell connects to the default database. The default database ' +
"is named '_system'. To create another database, use the '_createDatabase' method of the " +
"'db' object. To switch into an existing database, use '_useDatabase'. To get rid of a " +
"database and all of its collections, use '_dropDatabase':\n\n" +
text: 'By default, the ArangoShell connects to the default database.\n' +
"The default database is named '_system'. To create another database, use the\n" +
"'_createDatabase' method of the 'db' object. To switch to an existing database,\n" +
"use '_useDatabase':\n\n" +
' db._createDatabase("mydb");\n' +
' db._useDatabase("mydb");\n' +
' db._useDatabase("mydb");'
},
{
title: 'Removing Databases',
text: "To get rid of a database and all of its collections, use '_dropDatabase'.\n" +
"It needs to be called from within the '_system' database:\n\n" +
' db._useDatabase("_system");\n' +
' db._dropDatabase("mydb");'
}

View File

@ -49,6 +49,8 @@ function agencyTestSuite () {
var whoseTurn = 0;
var request = require("@arangodb/request");
wait(2);
function readAgency(list) {
// We simply try all agency servers in turn until one gives us an HTTP
// response:
@ -97,14 +99,6 @@ function agencyTestSuite () {
tearDown : function () {
},
////////////////////////////////////////////////////////////////////////////////
/// @brief startup timing
////////////////////////////////////////////////////////////////////////////////
testStartup : function () {
assertEqual(readAndCheck([["/x"]]), [{}]);
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test to write a single top level key
////////////////////////////////////////////////////////////////////////////////

View File

@ -1832,6 +1832,8 @@ function arangoErrorToHttpCode (num) {
case arangodb.ERROR_ARANGO_COLLECTION_NOT_LOADED:
case arangodb.ERROR_ARANGO_DOCUMENT_REV_BAD:
return exports.HTTP_BAD;
case arangodb.ERROR_CLUSTER_BACKEND_UNAVAILABLE:
return exports.HTTP_SERVICE_UNAVAILABLE;
}
return exports.HTTP_BAD;

View File

@ -322,7 +322,7 @@ function createLocalDatabases (plannedDatabases, currentDatabases, writeLocked)
// TODO: handle options and user information
console.info("creating local database '%s'", payload.name);
console.debug("creating local database '%s'", payload.name);
try {
db._createDatabase(payload.name);
@ -528,7 +528,7 @@ function createLocalCollections (plannedCollections, planVersion,
if (!localCollections.hasOwnProperty(shard)) {
// must create this shard
console.info("creating local shard '%s/%s' for central '%s/%s'",
console.debug("creating local shard '%s/%s' for central '%s/%s'",
database,
shard,
database,
@ -642,7 +642,7 @@ function createLocalCollections (plannedCollections, planVersion,
if (index.type !== 'primary' && index.type !== 'edge' &&
!indexes.hasOwnProperty(index.id)) {
console.info("creating index '%s/%s': %s",
console.debug("creating index '%s/%s': %s",
database,
shard,
JSON.stringify(index));
@ -982,7 +982,7 @@ function tryLaunchJob () {
return;
}
global.KEY_SET('shardSynchronization', 'running', jobInfo);
console.info('scheduleOneShardSynchronization: have launched job', jobInfo);
console.debug('scheduleOneShardSynchronization: have launched job', jobInfo);
delete jobs.scheduled[shards[0]];
global.KEY_SET('shardSynchronization', 'scheduled', jobs.scheduled);
}
@ -1062,7 +1062,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
var ok = false;
const rep = require('@arangodb/replication');
console.info("synchronizeOneShard: trying to synchronize local shard '%s/%s' for central '%s/%s'", database, shard, database, planId);
console.debug("synchronizeOneShard: trying to synchronize local shard '%s/%s' for central '%s/%s'", database, shard, database, planId);
try {
var ep = ArangoClusterInfo.getServerEndpoint(leader);
// First once without a read transaction:
@ -1123,7 +1123,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
shard);
}
if (ok) {
console.info('synchronizeOneShard: synchronization worked for shard',
console.debug('synchronizeOneShard: synchronization worked for shard',
shard);
} else {
throw 'Did not work for shard ' + shard + '.';
@ -1139,7 +1139,7 @@ function synchronizeOneShard (database, shard, planId, leader) {
}
// Tell others that we are done:
terminateAndStartOther();
console.info('synchronizeOneShard: done, %s/%s, %s/%s',
console.debug('synchronizeOneShard: done, %s/%s, %s/%s',
database, shard, database, planId);
}
@ -1627,7 +1627,7 @@ var handlePlanChange = function (plan, current) {
try {
versions.success = handleChanges(plan, current, writeLocked);
console.info('plan change handling successful');
console.debug('plan change handling successful');
} catch (err) {
console.error('error details: %s', JSON.stringify(err));
console.error('error stack: %s', err.stack);
@ -1829,7 +1829,8 @@ function rebalanceShards () {
db._useDatabase('_system');
}
}
console.info("Rebalancing shards");
console.info(shardMap);
console.info(dbTab);

View File

@ -257,12 +257,13 @@ function optimizerIndexesSortTestSuite () {
c.ensureHashIndex("value2", "value3");
var queries = [
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2, i.value3 RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC, i.value3 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC, i.value3 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && PASSTHRU(1) SORT i.value2 RETURN i.value2", true ]
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && PASSTHRU(1) SORT i.value2 RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2, i.value4 RETURN i.value2", true ]
];
queries.forEach(function(query) {
@ -313,10 +314,12 @@ function optimizerIndexesSortTestSuite () {
var queries = [
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC, i.value3 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC, i.value3 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", true ]
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value2, i.value4 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value4, i.value2 DESC RETURN i.value2", true ]
];
queries.forEach(function(query) {
@ -356,12 +359,12 @@ function optimizerIndexesSortTestSuite () {
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2" ,false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 ASC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 DESC RETURN i.value2", false ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value4 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 ASC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 DESC, i.value3 DESC, i.value4 DESC RETURN i.value2", true ],
[ "FOR i IN " + c.name() + " FILTER i.value2 == 2 && i.value3 == 2 && i.value4 == 2 SORT i.value2 ASC, i.value3 ASC, i.value4 DESC RETURN i.value2", true ]

View File

@ -256,7 +256,56 @@ function optimizerRuleTestSuite () {
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
assertEqual(simplePlan[2].type, "NoResultsNode");
});
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test multiple conditions
////////////////////////////////////////////////////////////////////////////////
testCondVars1 : function () {
var queries = [
"LET data = (FOR i IN 1..1 RETURN i) FOR v, e, p IN 1..10 OUTBOUND data GRAPH '" + graphName + "' FILTER p.vertices[0]._id == '123' FILTER p.vertices[1]._id != null FILTER p.edges[0]._id IN data[*].foo.bar RETURN 1"
];
queries.forEach(function(query) {
var result = AQL_EXPLAIN(query);
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
assertEqual(0, AQL_EXECUTE(query).json.length);
});
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test multiple conditions
////////////////////////////////////////////////////////////////////////////////
testCondVars2 : function () {
var queries = [
"LET data = (FOR i IN 1..1 RETURN i) FOR v, e, p IN 1..10 OUTBOUND 'circles/A' GRAPH '" + graphName + "' FILTER p.vertices[0]._id == '123' FILTER p.vertices[1]._id != null FILTER p.edges[0]._id IN data[*].foo.bar RETURN 1"
];
queries.forEach(function(query) {
var result = AQL_EXPLAIN(query);
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
assertEqual(0, AQL_EXECUTE(query).json.length);
});
},
////////////////////////////////////////////////////////////////////////////////
/// @brief test multiple conditions
////////////////////////////////////////////////////////////////////////////////
testCondVars3 : function () {
var queries = [
"LET data = (FOR i IN 1..1 RETURN i) FOR v, e, p IN 1..10 OUTBOUND 'circles/A' GRAPH '" + graphName + "' FILTER p.vertices[0]._id == '123' FILTER p.vertices[1]._id != null FILTER p.edges[0]._id IN data[*].foo.bar FILTER p.edges[1]._key IN data[*].bar.baz._id RETURN 1"
];
queries.forEach(function(query) {
var result = AQL_EXPLAIN(query);
assertNotEqual(-1, result.plan.rules.indexOf(ruleName), query);
assertEqual(0, AQL_EXECUTE(query).json.length);
});
}
};
}

View File

@ -329,8 +329,9 @@ function optimizerRuleTestSuite() {
hasIndexNodeWithRanges(result);
result = AQL_EXPLAIN(query, { }, paramIndexRangeSortFilter);
assertEqual([ IndexesRule, FilterRemoveRule ],
removeAlwaysOnClusterRules(result.plan.rules), query);
var rules = removeAlwaysOnClusterRules(result.plan.rules);
assertNotEqual(-1, rules.indexOf(IndexesRule));
assertNotEqual(-1, rules.indexOf(FilterRemoveRule));
hasNoFilterNode(result);
hasIndexNodeWithRanges(result);

View File

@ -158,45 +158,49 @@ function optimizerRuleTestSuite() {
skiplist.ensureIndex({ type: "hash", fields: [ "y", "z" ], unique: false });
var queries = [
[ "FOR v IN " + colName + " FILTER v.u == 1 SORT v.u RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.c RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.z RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.f RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.z RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y, v.z RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z, v.y RETURN 1", false ], // not supported yet
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.d RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.d == 1 && v.e == 1 SORT v.d RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.e RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.a, v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b, v.a RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.c RETURN 1", false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", false ]
[ "FOR v IN " + colName + " FILTER v.u == 1 SORT v.u RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.c RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.z RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.c == 1 SORT v.f RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.z RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.y == 1 SORT v.y RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.y RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.z == 1 SORT v.z RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.y, v.z RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.y == 1 && v.z == 1 SORT v.z, v.y RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.d RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.d == 1 && v.e == 1 SORT v.d RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.d == 1 SORT v.e RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.a, v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 SORT v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.a, v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.b == 1 SORT v.b, v.a RETURN 1", false, true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.c RETURN 1", true, true ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.b, v.a RETURN 1", true, false ],
[ "FOR v IN " + colName + " FILTER v.a == 1 && v.b == 1 SORT v.a, v.b, v.c RETURN 1", true, true ]
];
queries.forEach(function(query) {
var result = AQL_EXPLAIN(query[0]);
if (query[1]) {
assertNotEqual(-1, removeAlwaysOnClusterRules(result.plan.rules).indexOf(ruleName), query[0]);
hasNoSortNode(result);
}
else {
assertEqual(-1, removeAlwaysOnClusterRules(result.plan.rules).indexOf(ruleName), query[0]);
}
if (query[2]) {
hasSortNode(result);
} else {
hasNoSortNode(result);
}
});
},
@ -419,7 +423,7 @@ function optimizerRuleTestSuite() {
QResults[2] = AQL_EXECUTE(query, { }, paramIndexFromSort_IndexRange).json;
XPresult = AQL_EXPLAIN(query, { }, paramIndexFromSort_IndexRange);
assertEqual([ secondRuleName ], removeAlwaysOnClusterRules(XPresult.plan.rules).sort());
assertEqual([ ruleName, secondRuleName ], removeAlwaysOnClusterRules(XPresult.plan.rules).sort());
// The sortnode and its calculation node should not have been removed.
hasSortNode(XPresult);
hasCalculationNodes(XPresult, 4);
@ -1069,7 +1073,30 @@ function optimizerRuleTestSuite() {
}
});
assertTrue(seen);
},
testSortModifyFilterCondition : function () {
var query = "FOR v IN " + colName + " FILTER v.a == 123 SORT v.a, v.xxx RETURN v";
var rules = AQL_EXPLAIN(query).plan.rules;
assertNotEqual(-1, rules.indexOf(ruleName));
assertNotEqual(-1, rules.indexOf(secondRuleName));
assertNotEqual(-1, rules.indexOf("remove-filter-covered-by-index"));
var nodes = AQL_EXPLAIN(query).plan.nodes;
var seen = 0;
nodes.forEach(function(node) {
if (node.type === "IndexNode") {
++seen;
assertFalse(node.reverse);
} else if (node.type === "SortNode") {
// first sort condition (v.a) should have been removed because it is const
++seen;
assertEqual(1, node.elements.length);
}
});
assertEqual(2, seen);
}
};
}

View File

@ -25,6 +25,7 @@
#define ARANGODB_BASICS_STRING_BUFFER_H 1
#include "Basics/Common.h"
#include "Basics/Exceptions.h"
#include "Logger/Logger.h"
#include "Zip/zip.h"
@ -397,6 +398,10 @@ class StringBuffer {
explicit StringBuffer(TRI_memory_zone_t* zone, bool initializeMemory = true) {
TRI_InitStringBuffer(&_buffer, zone, initializeMemory);
if (_buffer._buffer == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
}
//////////////////////////////////////////////////////////////////////////////
@ -405,6 +410,10 @@ class StringBuffer {
StringBuffer(TRI_memory_zone_t* zone, size_t initialSize, bool initializeMemory = true) {
TRI_InitSizedStringBuffer(&_buffer, zone, initialSize, initializeMemory);
if (_buffer._buffer == nullptr) {
THROW_ARANGO_EXCEPTION(TRI_ERROR_OUT_OF_MEMORY);
}
}
//////////////////////////////////////////////////////////////////////////////

View File

@ -26,6 +26,7 @@
#include "Basics/Exceptions.h"
#include "Logger/Logger.h"
#include "Basics/StaticStrings.h"
#include "Basics/StringBuffer.h"
#include "Basics/StringUtils.h"
#include "Basics/Utf8Helper.h"
#include "Basics/VPackStringBufferAdapter.h"
@ -431,19 +432,17 @@ static bool PrintVelocyPack(int fd, VPackSlice const& slice,
return false;
}
TRI_string_buffer_t buffer;
TRI_InitStringBuffer(&buffer, TRI_UNKNOWN_MEM_ZONE);
arangodb::basics::VPackStringBufferAdapter bufferAdapter(&buffer);
arangodb::basics::StringBuffer buffer(TRI_UNKNOWN_MEM_ZONE);
arangodb::basics::VPackStringBufferAdapter bufferAdapter(buffer.stringBuffer());
try {
VPackDumper dumper(&bufferAdapter);
dumper.dump(slice);
} catch (...) {
// Writing failed
TRI_AnnihilateStringBuffer(&buffer);
return false;
}
if (TRI_LengthStringBuffer(&buffer) == 0) {
if (buffer.length() == 0) {
// should not happen
return false;
}
@ -451,17 +450,16 @@ static bool PrintVelocyPack(int fd, VPackSlice const& slice,
if (appendNewline) {
// add the newline here so we only need one write operation in the ideal
// case
TRI_AppendCharStringBuffer(&buffer, '\n');
buffer.appendChar('\n');
}
char const* p = TRI_BeginStringBuffer(&buffer);
size_t n = TRI_LengthStringBuffer(&buffer);
char const* p = buffer.begin();
size_t n = buffer.length();
while (0 < n) {
ssize_t m = TRI_WRITE(fd, p, (TRI_write_t)n);
if (m <= 0) {
TRI_AnnihilateStringBuffer(&buffer);
return false;
}
@ -469,7 +467,6 @@ static bool PrintVelocyPack(int fd, VPackSlice const& slice,
p += m;
}
TRI_AnnihilateStringBuffer(&buffer);
return true;
}

View File

@ -24,6 +24,7 @@
#define ARANGODB_PROGRAM_OPTIONS_PARAMETERS_H 1
#include "Basics/Common.h"
#include "Basics/Exceptions.h"
#include "Basics/fpconv.h"
#include <velocypack/Builder.h>
@ -305,7 +306,25 @@ template <typename T>
struct DiscreteValuesParameter : public T {
DiscreteValuesParameter(typename T::ValueType* ptr,
std::unordered_set<typename T::ValueType> const& allowed)
: T(ptr), allowed(allowed) {}
: T(ptr), allowed(allowed) {
if (allowed.find(*ptr) == allowed.end()) {
// default value is not in list of allowed values
std::string msg("invalid default value for DiscreteValues parameter: ");
msg.append(stringifyValue(*ptr));
msg.append(". allowed values: ");
size_t i = 0;
for (auto const& it : allowed) {
if (i > 0) {
msg.append(" or ");
}
msg.append(stringifyValue(it));
++i;
}
THROW_ARANGO_EXCEPTION_MESSAGE(TRI_ERROR_INTERNAL, msg.c_str());
}
}
std::string set(std::string const& value) override {
auto it = allowed.find(fromString<typename T::ValueType>(value));

View File

@ -85,7 +85,7 @@ void SslServerFeature::collectOptions(std::shared_ptr<ProgramOptions> options) {
options->addHiddenOption(
"--ssl.options", "ssl connection options, see OpenSSL documentation",
new DiscreteValuesParameter<UInt64Parameter>(&_sslOptions, sslProtocols));
new UInt64Parameter(&_sslOptions));
options->addOption(
"--ssl.ecdh-curve",

View File

@ -56,7 +56,6 @@ for aid in `seq 0 $(( $POOLSZ - 1 ))`; do
--server.endpoint tcp://0.0.0.0:$port \
--server.statistics false \
--agency.compaction-step-size $COMP \
--log.level agency=debug \
--log.force-direct true \
> agency/$port.stdout 2>&1 &
done