1
0
Fork 0

Merge branch 'devel' of github.com:arangodb/arangodb into devel

This commit is contained in:
hkernbach 2016-05-19 11:21:56 +02:00
commit 01f70e7ddb
718 changed files with 14111 additions and 17566 deletions

View File

@ -169,7 +169,12 @@ if (CMAKE_COMPILER_IS_GNUCC)
message(STATUS "Compiler type GNU: ${CMAKE_CXX_COMPILER}")
endif ()
# -DSNAPPY -DZLIB
set(BASE_FLAGS "${BASE_FLAGS} -W -Wextra -Wall -Wsign-compare -Wshadow -Wno-unused-parameter -fno-omit-frame-pointer -momit-leaf-frame-pointer -fno-builtin-memcmp -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers -DROCKSDB_MALLOC_USABLE_SIZE -march=native -isystem -fPIC -D${OS_DEFINE} ${POSIX_FLAG}")
if (SOLARIS)
set(ROCKSDB_MALLOC_USABLE_SIZE "")
else()
set(ROCKSDB_MALLOC_USABLE_SIZE "-DROCKSDB_MALLOC_USABLE_SIZE")
endif()
set(BASE_FLAGS "${BASE_FLAGS} -W -Wextra -Wall -Wsign-compare -Wshadow -Wno-unused-parameter -fno-omit-frame-pointer -momit-leaf-frame-pointer -fno-builtin-memcmp -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers ${ROCKSDB_MALLOC_USABLE_SIZE} -march=native -isystem -fPIC -D${OS_DEFINE} ${POSIX_FLAG}")
set(CMAKE_C_FLAGS "-g" CACHE INTERNAL "default C compiler flags")
set(CMAKE_C_FLAGS_DEBUG "-O0 -g -Werror" CACHE INTERNAL "C debug flags")
@ -222,12 +227,12 @@ elseif (MSVC)
add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64)
if((${OPTIMIZE_DEBUG} EQUAL 1))
message(STATUS "Debug optimization is enabled")
set(CMAKE_CXX_FLAGS_DEBUG "/Oxt /MDd")
set(CMAKE_CXX_FLAGS_DEBUG "/Oxt /MTd")
else()
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm /MDd")
set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm /MTd")
endif()
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oxt /Zp8 /Gm- /Gy /MD")
set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oxt /Zp8 /Gm- /Gy /MT")
set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG")
set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG")

View File

@ -95,8 +95,9 @@ struct iovec {
} // namespace snappy
#if @ac_cv_have_ssize_t@
typedef uint64_t ssize_t;
#if @ac_cv_have_ssize_t@
#include <intsafe.h>
typedef SSIZE_T ssize_t;
#endif
#endif // THIRD_PARTY_SNAPPY_OPENSOURCE_SNAPPY_STUBS_PUBLIC_H_

View File

@ -186,12 +186,29 @@ v3.0.0 (XXXX-XX-XX)
using a backwards-compatible "legacy mode"
v2.8.9 (XXXX-XX-XX)
v2.8.10 (XXXX-XX-XX)
--------------------
* fixed issue #1826: arangosh --javascript.execute: internal error (geo index issue)
* fixed issue #1823: Arango crashed hard executing very simple query on windows
v2.8.9 (2016-05-13)
-------------------
* fixed escaping and quoting of extra parameters for executables in Mac OS X App
* added "waiting for" status variable to web interface collection figures view
* fixed undefined behavior in query cache invaldation
* fixed access to /_admin/statistics API in case statistics are disable via option
`--server.disable-statistics`
* Foxx manager will no longer fail hard when Foxx store is unreachable unless installing
a service from the Foxx store (e.g. when behind a firewall or GitHub is unreachable).
v2.8.8 (2016-04-19)
-------------------

View File

@ -320,7 +320,7 @@ else ()
option(USE_OPTIMIZE_FOR_ARCHITECTURE "try to determine CPU architecture" ON)
if (NOT USE_OPTIMIZE_FOR_ARCHITECTURE)
if (NOT ${USE_OPTIMIZE_FOR_ARCHITECTURE})
# mop: core2 (merom) is our absolute minimum!
SET(TARGET_ARCHITECTURE "merom")
endif ()

View File

@ -1,7 +1,7 @@
!CHAPTER Array Operators
!SUBSECTION Array expansion
!SECTION Array expansion
In order to access a named attribute from all elements in an array easily, AQL
offers the shortcut operator <i>[\*]</i> for array variable expansion.
@ -62,63 +62,25 @@ This will produce:
]
```
This a shortcut for the longer, semantically equivalent query:
This is a shortcut for the longer, semantically equivalent query:
```
FOR u IN users
RETURN { name: u.name, friends: (FOR f IN u.friends RETURN f.name) }
```
While producing a result with the <i>[\*]</i> operator, it is also possible
to filter while iterating over the array, and to create a projection using the
current array element.
For example, to return only the names of friends that have an *age* value
higher than the user herself an inline *FILTER* can be used:
```
FOR u IN users
RETURN { name: u.name, friends: u.friends[* FILTER CURRENT.age > u.age].name }
```
The pseudo-variable *CURRENT* can be used to access the current array element.
The *FILTER* condition can refer to *CURRENT* or any variables valid in the
outer scope.
To return a projection of the current element, use *RETURN*. If a *FILTER* is
also present, *RETURN* must come later.
```
FOR u IN users
RETURN u.friends[* RETURN CONCAT(CURRENT.name, " is a friend of ", u.name)]
```
The above will return:
```json
[
[
"tina is a friend of john",
"helga is a friend of john",
"alfred is a friend of john"
],
[
"sergei is a friend of yves",
"tiffany is a friend of yves"
],
[
"bob is a friend of sandra",
"elena is a friend of sandra"
]
]
```
!SUBSECTION Array contraction
!SECTION Array contraction
In order to collapse (or flatten) results in nested arrays, AQL provides the <i>[\*\*]</i>
operator. It works similar to the <i>[\*]</i> operator, but additionally collapses nested
arrays. How many levels are collapsed is determined by the amount of <i>\*</i> characters used.
arrays.
How many levels are collapsed is determined by the amount of asterisk characters used.
<i>[\*\*]</i> collapses one level of nesting - just like `FLATTEN(array)` or `FLATTEN(array, 1)`
would do -, <i>[\*\*\*]</i> collapses two levels - the equivalent to `FLATTEN(array, 2)` - and
so on.
Let's compare the array expansion operator with an array contraction operator.
For example, the following query produces an array of friend names per user:
```
@ -147,11 +109,11 @@ As we have multiple users, the overall result is a nested array:
```
If the goal is to get rid of the nested array, we can apply the <i>[\*\*]</i> operator on the
result. Simplying appending <i>[\*\*]</i> to the query won't help, because *u.friends*
result. But simply appending <i>[\*\*]</i> to the query won't help, because *u.friends*
is not a nested (multi-dimensional) array, but a simple (one-dimensional) array. Still,
the <i>[\*\*]</i> can be used if it has access to a multi-dimensional nested result.
We can easily create a nested result like this:
We can extend above query as follows and still create the same nested result:
```
RETURN (
@ -159,7 +121,7 @@ RETURN (
)
```
By now appending the <i>[\*\*]</i> operator the end of the query, the query result becomes:
By now appending the <i>[\*\*]</i> operator at the end of the query...
```
RETURN (
@ -167,6 +129,8 @@ RETURN (
)[**]
```
... the query result becomes:
```json
[
[
@ -180,3 +144,155 @@ RETURN (
]
]
```
Note that the elements are not de-duplicated. For a flat array with only unique
elements, a combination of [UNIQUE()](../Functions/Array.md#unique) and
[FLATTEN()](../Functions/Array.md#flatten) is advisable.
!SECTION Inline expressions
It is possible to filter elements while iterating over an array, to limit the amount
of returned elements and to create a projection using the current array element.
Sorting is not supported by this shorthand form.
These inline expressions can follow array expansion and contraction operators
<i>[\* ...]</i>, <i>[\*\* ...]</i> etc. The keywords *FILTER*, *LIMIT* and *RETURN*
must occur in this order if they are used in combination, and can only occur once:
`anyArray[* FILTER conditions LIMIT skip,limit RETURN projection]`
Example with nested numbers and array contraction:
```
LET arr = [ [ 1, 2 ], 3, [ 4, 5 ], 6 ]
RETURN arr[** FILTER CURRENT % 2 == 0]
```
All even numbers are returned in a flat array:
```json
[
[ 2, 4, 6 ]
]
```
Complex example with multiple conditions, limit and projection:
```
FOR u IN users
RETURN {
name: u.name,
friends: u.friends[* FILTER CONTAINS(CURRENT.name, "a") AND CURRENT.age > 40
LIMIT 2
RETURN CONCAT(CURRENT.name, " is ", CURRENT.age)
]
}
```
No more than two computed strings based on *friends* with an `a` in their name and
older than 40 years are returned per user:
```json
[
{
"name": "john",
"friends": [
"tina is 43",
"helga is 52"
]
},
{
"name": "sandra",
"friends": [
"elena is 48"
]
},
{
"name": "yves",
"friends": []
}
]
```
!SUBSECTION Inline filter
To return only the names of friends that have an *age* value
higher than the user herself, an inline *FILTER* can be used:
```
FOR u IN users
RETURN { name: u.name, friends: u.friends[* FILTER CURRENT.age > u.age].name }
```
The pseudo-variable *CURRENT* can be used to access the current array element.
The *FILTER* condition can refer to *CURRENT* or any variables valid in the
outer scope.
!SUBSECTION Inline limit
The number of elements returned can be restricted with *LIMIT*. It works the same
as the [limit operation](../Operations/Limit.md). *LIMIT* must come after *FILTER*
and before *RETURN*, if they are present.
```
FOR u IN users
RETURN { name: u.name, friends: u.friends[* LIMIT 1].name }
```
Above example returns one friend each:
```json
[
{ "name": "john", "friends": [ "tina" ] },
{ "name": "sandra", "friends": [ "bob" ] },
{ "name": "yves", "friends": [ "sergei" ] }
]
```
A number of elements can also be skipped and up to *n* returned:
```
FOR u IN users
RETURN { name: u.name, friends: u.friends[* LIMIT 1,2].name }
```
The example query skips the first friend and returns two friends at most
per user:
```json
[
{ "name": "john", "friends": [ "helga", "alfred" ] },
{ "name": "sandra", "friends": [ "elena" ] },
{ "name": "yves", "friends": [ "tiffany" ] }
]
```
!SUBSECTION Inline projection
To return a projection of the current element, use *RETURN*. If a *FILTER* is
also present, *RETURN* must come later.
```
FOR u IN users
RETURN u.friends[* RETURN CONCAT(CURRENT.name, " is a friend of ", u.name)]
```
The above will return:
```json
[
[
"tina is a friend of john",
"helga is a friend of john",
"alfred is a friend of john"
],
[
"sergei is a friend of yves",
"tiffany is a friend of yves"
],
[
"bob is a friend of sandra",
"elena is a friend of sandra"
]
]
```

View File

@ -0,0 +1 @@
!CHAPTER Common Errors

View File

@ -8,7 +8,7 @@ AQL supports the following data-modification operations:
- **REMOVE**: remove existing documents from a collection
- **UPSERT**: conditionally insert or update documents in a collection
Those operations are detailed in the chapter [High Level Operations](Operations.md).
Those operations are detailed in the chapter [High Level Operations](Operations/README.md).
!SUBSECTION Modifying a single document
@ -16,34 +16,49 @@ Those operations are detailed in the chapter [High Level Operations](Operations.
Let's start with the basics: `INSERT`, `UPDATE` and `REMOVE` operations on single documents.
Here is an example that insert a document in an existing collection *users*:
INSERT {firstName:'Anna', name:'Pavlova', profession:'artist'} IN users
```js
INSERT { firstName: "Anna", name: "Pavlova", profession: "artist" } IN users
```
You may provide a key for the new document; if not provided, ArangoDB will create one for you.
INSERT {_key:'GilbertoGil', firstName:'Gilberto', name:'Gil', city:'Fortalezza'} IN users
```js
INSERT { _key: "GilbertoGil", firstName: "Gilberto", name: "Gil", city: "Fortalezza" } IN users
```
As Arango is schema-free, attributes of the documents may vary:
INSERT {_key:'PhilCarpenter', firstName:'Phil', name:'Carpenter', middleName:'G.',status:'inactive', } IN users
```js
INSERT { _key: "PhilCarpenter", firstName: "Phil", name: "Carpenter", middleName: "G.", status: "inactive" } IN users
```
INSERT {_key:'NatachaDeclerck', firstName:'Natacha', name:'Declerck', location:'Antwerp'} IN users
```js
INSERT { _key: "NatachaDeclerck", firstName: "Natacha", name: "Declerck", location: "Antwerp" } IN users
```
Update is quite simple. The following AQL statement will add or change the attributes status and location
UPDATE 'PhilCarpenter' WITH { status:'active', location:'Beijing' } IN users
```js
UPDATE "PhilCarpenter" WITH { status: "active", location: "Beijing" } IN users
```
Replace is an alternative to update where all attributes of the document are replaced.
REPLACE { _key: 'NatachaDeclerck', firstName:'Natacha', name:'Leclerc', status: 'active', level:'premium' } IN users
```js
REPLACE { _key: "NatachaDeclerck", firstName: "Natacha", name: "Leclerc", status: "active", level: "premium" } IN users
```
Removing a document if you know its key is simple as well :
REMOVE 'GilbertoGil' IN users
```js
REMOVE "GilbertoGil" IN users
```
or
REMOVE {_key:'GilbertoGil'} IN users
```js
REMOVE { _key: "GilbertoGil" } IN users
```
!SUBSECTION Modifying multiple documents
@ -54,26 +69,30 @@ iterate over a given list of documents. They can optionally be combined with
Let's start with an example that modifies existing documents in a collection
*users* that match some condition:
FOR u IN users
FILTER u.status == 'not active'
UPDATE u WITH { status: 'inactive' } IN users
```js
FOR u IN users
FILTER u.status == "not active"
UPDATE u WITH { status: "inactive" } IN users
```
Now, let's copy the contents of the collection *users* into the collection
*backup*:
FOR u IN users
INSERT u IN backup
```js
FOR u IN users
INSERT u IN backup
```
As a final example, let's find some documents in collection *users* and
remove them from collection *backup*. The link between the documents in both
collections is established via the documents' keys:
FOR u IN users
FILTER u.status == 'deleted'
REMOVE u IN backup
```js
FOR u IN users
FILTER u.status == "deleted"
REMOVE u IN backup
```
!SUBSECTION Returning documents
@ -81,19 +100,25 @@ Data-modification queries can optionally return documents. In order to reference
the inserted, removed or modified documents in a `RETURN` statement, data-modification
statements introduce the `OLD` and/or `NEW` pseudo-values:
FOR i IN 1..100
INSERT { value: i } IN test
RETURN NEW
```js
FOR i IN 1..100
INSERT { value: i } IN test
RETURN NEW
```
FOR u IN users
FILTER u.status == 'deleted'
REMOVE u IN users
RETURN OLD
```js
FOR u IN users
FILTER u.status == "deleted"
REMOVE u IN users
RETURN OLD
```
FOR u IN users
FILTER u.status == 'not active'
UPDATE u WITH { status: 'inactive' } IN users
RETURN NEW
```js
FOR u IN users
FILTER u.status == "not active"
UPDATE u WITH { status: "inactive" } IN users
RETURN NEW
```
`NEW` refers to the inserted or modified document revision, and `OLD` refers
to the document revision before update or removal. `INSERT` statements can
@ -112,21 +137,23 @@ by queries.
For example, the following query will return only the keys of the inserted documents:
FOR i IN 1..100
INSERT { value: i } IN test
RETURN NEW._key
```js
FOR i IN 1..100
INSERT { value: i } IN test
RETURN NEW._key
```
!SUBSUBSECTION Using OLD and NEW in the same query
For `UPDATE`, `REPLACE` and `UPSERT` statements, both `OLD` and `NEW` can be used
to return the previous revision of a document together with the updated revision:
FOR u IN users
FILTER u.status == 'not active'
UPDATE u WITH { status: 'inactive' } IN users
RETURN { old: OLD, new: NEW }
```js
FOR u IN users
FILTER u.status == "not active"
UPDATE u WITH { status: "inactive" } IN users
RETURN { old: OLD, new: NEW }
```
!SUBSUBSECTION Calculations with OLD or NEW
@ -137,18 +164,20 @@ updated, or a new document was inserted. It does so by checking the `OLD` variab
after the `UPSERT` and using a `LET` statement to store a temporary string for
the operation type:
UPSERT { name: 'test' }
INSERT { name: 'test' }
UPDATE { } IN users
LET opType = IS_NULL(old) ? 'insert' : 'update'
RETURN { _key: NEW._key, type: opType }
```js
UPSERT { name: "test" }
INSERT { name: "test" }
UPDATE { } IN users
LET opType = IS_NULL(old) ? "insert" : "update"
RETURN { _key: NEW._key, type: opType }
```
!SUBSECTION Restrictions
The name of the modified collection (*users* and *backup* in the above cases)
must be known to the AQL executor at query-compile time and cannot change at
runtime. Using a bind parameter to specify the [collection name](../Glossary/README.md#collection-name) is allowed.
runtime. Using a bind parameter to specify the
[collection name](../Users/Appendix/Glossary.html#collection-name) is allowed.
Data-modification queries are restricted to modifying data in a single
collection per query. That means a data-modification query cannot modify

View File

@ -1,9 +1,9 @@
!CHAPTER Combining Graph Traversals
!SUBSECTION Finding the start vertex via a geo query
Our first example will locate the start vertex for a graph traversal via [a geo index](../IndexHandling/Geo.md).
We use [the city graph](../Graphs/README.md#the-city-graph) and its geo indices:
Our first example will locate the start vertex for a graph traversal via [a geo index](../../Users/Indexing/Geo.html).
We use [the city graph](../../Users/Graphs/index.html#the-city-graph) and its geo indices:
![Cities Example Graph](../Graphs/cities_graph.png)
![Cities Example Graph](../../Users/Graphs/cities_graph.png)
@startDocuBlockInline COMBINING_GRAPH_01_create_graph

View File

@ -3,7 +3,7 @@
!SECTION Amount of documents in a collection
To return the count of documents that currently exist in a collection,
you can call the [LENGTH() function](../Aql/ArrayFunctions.md#length):
you can call the [LENGTH() function](../Functions/Array.md#length):
```
RETURN LENGTH(collection)
@ -11,11 +11,13 @@ RETURN LENGTH(collection)
This type of call is optimized since 2.8 (no unnecessary intermediate result
is built up in memory) and it is therefore the prefered way to determine the count.
Internally, [COLLECTION_COUNT()](../Functions/Miscellaneous.md#collectioncount) is called.
In earlier versions with `COLLECT ... WITH COUNT INTO` available (since 2.4),
you may use the following code for better performance instead:
you may use the following code instead of *LENGTH()* for better performance:
```
FOR doc IN collection
COLLECT WITH COUNT INTO length
RETURN length
COLLECT WITH COUNT INTO length
RETURN length
```

View File

@ -9,29 +9,29 @@ added in the query if required.
*COLLECT* can be used to make a result set unique. The following query will return each distinct
`age` attribute value only once:
```
FOR u IN users
COLLECT age = u.age
RETURN age
```js
FOR u IN users
COLLECT age = u.age
RETURN age
```
This is grouping without tracking the group values, but just the group criterion (*age*) value.
Grouping can also be done on multiple levels using *COLLECT*:
```
FOR u IN users
COLLECT status = u.status, age = u.age
RETURN { status, age }
```js
FOR u IN users
COLLECT status = u.status, age = u.age
RETURN { status, age }
```
Alternatively *RETURN DISTINCT* can be used to make a result set unique. *RETURN DISTINCT* supports a
Alternatively *RETURN DISTINCT* can be used to make a result set unique. *RETURN DISTINCT* supports a
single criterion only:
```
```js
FOR u IN users
RETURN DISTINCT u.age
RETURN DISTINCT u.age
```
Note: the order of results is undefined for *RETURN DISTINCT*.
@ -41,70 +41,72 @@ Note: the order of results is undefined for *RETURN DISTINCT*.
To group users by age, and return the names of the users with the highest ages,
we'll issue a query like this:
```
FOR u IN users
FILTER u.active == true
COLLECT age = u.age INTO usersByAge
SORT age DESC LIMIT 0, 5
RETURN {
```js
FOR u IN users
FILTER u.active == true
COLLECT age = u.age INTO usersByAge
SORT age DESC LIMIT 0, 5
RETURN {
age,
users : usersByAge[*].u.name
users: usersByAge[*].u.name
}
```
[
{
"age" : 37,
"users" : [
"John",
"Sophia"
]
},
{
"age" : 36,
"users" : [
"Fred",
"Emma"
]
},
{
"age" : 34,
"users" : [
"Madison"
]
},
{
"age" : 33,
"users" : [
"Chloe",
"Michael"
]
},
{
"age" : 32,
"users" : [
"Alexander"
]
}
```json
[
{
"age": 37,
"users": [
"John",
"Sophia"
]
},
{
"age": 36,
"users": [
"Fred",
"Emma"
]
},
{
"age": 34,
"users": [
"Madison"
]
},
{
"age": 33,
"users": [
"Chloe",
"Michael"
]
},
{
"age": 32,
"users": [
"Alexander"
]
}
]
```
The query will put all users together by their *age* attribute. There will be one
result document per distinct *age* value (let aside the *LIMIT*). For each group,
we have access to the matching document via the *usersByAge* variable introduced in
the *COLLECT* statement.
the *COLLECT* statement.
!SUBSECTION Variable Expansion
The *usersByAge* variable contains the full documents found, and as we're only
interested in user names, we'll use the expansion operator <i>[\*]</i> to extract just the
The *usersByAge* variable contains the full documents found, and as we're only
interested in user names, we'll use the expansion operator <i>[\*]</i> to extract just the
*name* attribute of all user documents in each group.
The <i>[\*]</i> expansion operator is just a handy short-cut. Instead of <i>usersByAge[\*].u.name</i>
we could also write:
```
```js
FOR temp IN usersByAge
RETURN temp.u.name
RETURN temp.u.name
```
!SUBSECTION Grouping by multiple criteria
@ -113,42 +115,44 @@ To group by multiple criteria, we'll use multiple arguments in the *COLLECT* cla
For example, to group users by *ageGroup* (a derived value we need to calculate first)
and then by *gender*, we'll do:
```js
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender INTO group
SORT ageGroup DESC
RETURN {
ageGroup,
gender
}
```
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender INTO group
SORT ageGroup DESC
RETURN {
ageGroup,
gender
}
[
{
"ageGroup" : 35,
"gender" : "f"
},
{
"ageGroup" : 35,
"gender" : "m"
},
{
"ageGroup" : 30,
"gender" : "f"
},
{
"ageGroup" : 30,
"gender" : "m"
},
{
"ageGroup" : 25,
"gender" : "f"
},
{
"ageGroup" : 25,
"gender" : "m"
}
```json
[
{
"ageGroup": 35,
"gender": "f"
},
{
"ageGroup": 35,
"gender": "m"
},
{
"ageGroup": 30,
"gender": "f"
},
{
"ageGroup": 30,
"gender": "m"
},
{
"ageGroup": 25,
"gender": "f"
},
{
"ageGroup": 25,
"gender": "m"
}
]
```
@ -158,49 +162,51 @@ If the goal is to count the number of values in each group, AQL provides the spe
*COLLECT WITH COUNT INTO* syntax. This is a simple variant for grouping with an additional
group length calculation:
```js
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender WITH COUNT INTO numUsers
SORT ageGroup DESC
RETURN {
ageGroup,
gender,
numUsers
}
```
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender WITH COUNT INTO numUsers
SORT ageGroup DESC
RETURN {
ageGroup,
gender,
numUsers
}
[
{
"ageGroup" : 35,
"gender" : "f",
"numUsers" : 2
},
{
"ageGroup" : 35,
"gender" : "m",
"numUsers" : 2
},
{
"ageGroup" : 30,
"gender" : "f",
"numUsers" : 4
},
{
"ageGroup" : 30,
"gender" : "m",
"numUsers" : 4
},
{
"ageGroup" : 25,
"gender" : "f",
"numUsers" : 2
},
{
"ageGroup" : 25,
"gender" : "m",
"numUsers" : 2
}
```json
[
{
"ageGroup": 35,
"gender": "f",
"numUsers": 2
},
{
"ageGroup": 35,
"gender": "m",
"numUsers": 2
},
{
"ageGroup": 30,
"gender": "f",
"numUsers": 4
},
{
"ageGroup": 30,
"gender": "m",
"numUsers": 4
},
{
"ageGroup": 25,
"gender": "f",
"numUsers": 2
},
{
"ageGroup": 25,
"gender": "m",
"numUsers": 2
}
]
```
@ -209,45 +215,47 @@ FOR u IN users
Adding further aggregation is also simple in AQL by using an *AGGREGATE* clause
in the *COLLECT*:
```js
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender
AGGREGATE numUsers = LENGTH(1),
minAge = MIN(u.age),
maxAge = MAX(u.age)
SORT ageGroup DESC
RETURN {
ageGroup,
gender,
numUsers,
minAge,
maxAge
}
```
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender
AGGREGATE numUsers = LENGTH(1),
minAge = MIN(u.age),
maxAge = MAX(u.age)
SORT ageGroup DESC
RETURN {
ageGroup,
gender,
numUsers,
minAge,
maxAge
}
[
{
"ageGroup" : 35,
"gender" : "f",
"numUsers" : 2,
"minAge" : 36,
"maxAge" : 39,
},
{
"ageGroup" : 35,
"gender" : "m",
"numUsers" : 2,
"minAge" : 35,
"maxAge" : 39,
},
```json
[
{
"ageGroup": 35,
"gender": "f",
"numUsers": 2,
"minAge": 36,
"maxAge": 39,
},
{
"ageGroup": 35,
"gender": "m",
"numUsers": 2,
"minAge": 35,
"maxAge": 39,
},
...
]
```
We have used the aggregate functions *LENGTH* here (it returns the length of a array).
This is the equivalent to SQL's `SELECT g, COUNT(*) FROM ... GROUP BY g`. In addition to
*LENGTH* AQL also provides *MAX*, *MIN*, *SUM* and *AVERAGE*, *VARIANCE_POPULATION*,
We have used the aggregate functions *LENGTH* here (it returns the length of a array).
This is the equivalent to SQL's `SELECT g, COUNT(*) FROM ... GROUP BY g`. In addition to
*LENGTH* AQL also provides *MAX*, *MIN*, *SUM* and *AVERAGE*, *VARIANCE_POPULATION*,
*VARIANCE_SAMPLE*, *STDDEV_POPULATION* and *STDDEV_SAMPLE* as basic aggregation functions.
In AQL all aggregation functions can be run on arrays only. If an aggregation function
@ -265,38 +273,40 @@ Aggregation can also be performed after a *COLLECT* operation using other AQL co
though performance-wise this is often inferior to using *COLLECT* with *AGGREGATE*.
The same query as before can be turned into a post-aggregation query as shown below. Note
that this query will build and pass on all group values for all groups inside the variable
that this query will build and pass on all group values for all groups inside the variable
*g*, and perform the aggregation at the latest possible stage:
```js
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender INTO g
SORT ageGroup DESC
RETURN {
ageGroup,
gender,
numUsers: LENGTH(g[*]),
minAge: MIN(g[*].u.age),
maxAge: MAX(g[*].u.age)
}
```
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5,
gender = u.gender INTO g
SORT ageGroup DESC
RETURN {
ageGroup,
gender,
numUsers : LENGTH(g[*]),
minAge : MIN(g[*].u.age),
maxAge : MAX(g[*].u.age)
}
[
{
"ageGroup" : 35,
"gender" : "f",
"numUsers" : 2,
"minAge" : 36,
"maxAge" : 39,
},
{
"ageGroup" : 35,
"gender" : "m",
"numUsers" : 2,
"minAge" : 35,
"maxAge" : 39,
},
```json
[
{
"ageGroup": 35,
"gender": "f",
"numUsers": 2,
"minAge": 36,
"maxAge": 39,
},
{
"ageGroup": 35,
"gender": "m",
"numUsers": 2,
"minAge": 35,
"maxAge": 39,
},
...
]
```
@ -308,60 +318,62 @@ the aggregation during the collect operation, at the earliest possible stage.
!SUBSECTION Post-filtering aggregated data
To filter the results of a grouping or aggregation operation (i.e. something
similar to *HAVING* in SQL), simply add another *FILTER* clause after the *COLLECT*
statement.
similar to *HAVING* in SQL), simply add another *FILTER* clause after the *COLLECT*
statement.
For example, to get the 3 *ageGroup*s with the most users in them:
```js
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO group
LET numUsers = LENGTH(group)
FILTER numUsers > 2 /* group must contain at least 3 users in order to qualify */
SORT numUsers DESC
LIMIT 0, 3
RETURN {
"ageGroup": ageGroup,
"numUsers": numUsers,
"users": group[*].u.name
}
```
FOR u IN users
FILTER u.active == true
COLLECT ageGroup = FLOOR(u.age / 5) * 5 INTO group
LET numUsers = LENGTH(group)
FILTER numUsers > 2 /* group must contain at least 3 users in order to qualify */
SORT numUsers DESC
LIMIT 0, 3
RETURN {
"ageGroup" : ageGroup,
"numUsers" : numUsers,
"users" : group[*].u.name
}
[
{
"ageGroup" : 30,
"numUsers" : 8,
"users" : [
"Abigail",
"Madison",
"Anthony",
"Alexander",
"Isabella",
"Chloe",
"Daniel",
"Michael"
]
},
{
"ageGroup" : 25,
"numUsers" : 4,
"users" : [
"Mary",
"Mariah",
"Jim",
"Diego"
]
},
{
"ageGroup" : 35,
"numUsers" : 4,
"users" : [
"Fred",
"John",
"Emma",
"Sophia"
]
}
```json
[
{
"ageGroup": 30,
"numUsers": 8,
"users": [
"Abigail",
"Madison",
"Anthony",
"Alexander",
"Isabella",
"Chloe",
"Daniel",
"Michael"
]
},
{
"ageGroup": 25,
"numUsers": 4,
"users": [
"Mary",
"Mariah",
"Jim",
"Diego"
]
},
{
"ageGroup": 35,
"numUsers": 4,
"users": [
"Fred",
"John",
"Emma",
"Sophia"
]
}
]
```

View File

@ -24,7 +24,9 @@ FOR u IN users
"user" : u.name,
"friendId" : f.thisUser
}
```
```json
[
{
"user" : "Abigail",
@ -88,7 +90,9 @@ FOR u IN users
RETURN f.thisUser
)
}
```
```json
[
{
"user" : "Abigail",
@ -144,7 +148,9 @@ FOR u IN users
RETURN u2.name
)
}
```
```json
[
{
"user" : "Abigail",
@ -196,7 +202,9 @@ FOR user IN users
)
FILTER LENGTH(friendList) == 0
RETURN { "user" : user.name }
```
```json
[
{
"user" : "Abigail"
@ -217,12 +225,21 @@ Since we're free of schemata, there is by default no way to tell the format of t
documents. So, if your documents don't contain an attribute, it defaults to
null. We can however check our data for accuracy like this:
```
```js
RETURN LENGTH(FOR u IN users FILTER u.userId == null RETURN 1)
```
```json
[
10000
]
```
```js
RETURN LENGTH(FOR f IN relations FILTER f.friendOf == null RETURN 1)
```
```json
[
10000
]

View File

@ -8,7 +8,9 @@ To return three complete documents from collection *users*, the following query
FOR u IN users
LIMIT 0, 3
RETURN u
```
```json
[
{
"_id" : "users/229886047207520",
@ -61,7 +63,9 @@ FOR u IN users
"name" : u.name
}
}
```
```json
[
{
"user" : {
@ -99,7 +103,9 @@ FOR u IN users
"age" : u.age,
"name" : u.name
}
```
```json
[
{
"age" : 37,

View File

@ -14,9 +14,10 @@ Some of the following example queries are executed on a collection 'users' with
Note that all documents created in any collections will automatically get the
following server-generated attributes:
* *_id*: A unique id, consisting of [collection name](../Glossary/README.md#collection-name) and a server-side sequence value
* *_key*: The server sequence value
* *_rev*: The document's revision id
- *_id*: A unique id, consisting of [collection name](../../Users/Appendix/Glossary.html#collection-name)
and a server-side sequence value
- *_key*: The server sequence value
- *_rev*: The document's revision id
Whenever you run queries on the documents in collections, don't be surprised if
these additional attributes are returned as well.
@ -25,7 +26,7 @@ Please also note that with real-world data, you might want to create additional
indexes on the data (left out here for brevity). Adding indexes on attributes that are
used in *FILTER* statements may considerably speed up queries. Furthermore, instead of
using attributes such as *id*, *from* and *to*, you might want to use the built-in
*_id*, *_from* and *_to* attributes. Finally, [edge collection](../Glossary/README.md#edge-collection)s provide a nice way of
*_id*, *_from* and *_to* attributes. Finally, [edge collection](../../Users/Appendix/Glossary.html#edge-collection)s provide a nice way of
establishing references / links between documents. These features have been left out here
for brevity as well.

View File

@ -8,7 +8,7 @@ An explain will throw an error if the given query is syntactically invalid. Othe
return the execution plan and some information about what optimizations could be applied to
the query. The query will not be executed.
Explaining a query can be achieved by calling the [HTTP REST API](../HttpAqlQuery/README.md).
Explaining a query can be achieved by calling the [HTTP REST API](../../HTTP/AqlQuery/index.html).
A query can also be explained from the ArangoShell using `ArangoStatement`'s `explain` method.
By default, the query optimizer will return what it considers to be the *optimal plan*. The
@ -19,12 +19,12 @@ is an array of warnings that occurred during optimization or execution plan crea
Each plan in the result is an object with the following attributes:
- *nodes*: the array of execution nodes of the plan. The list of available node types
can be found [here](../Aql/Optimizer.md)
can be found [here](Optimizer.md)
- *estimatedCost*: the total estimated cost for the plan. If there are multiple
plans, the optimizer will choose the plan with the lowest total cost.
- *collections*: an array of collections used in the query
- *rules*: an array of rules the optimizer applied. The list of rules can be
found [here](../Aql/Optimizer.md)
found [here](Optimizer.md)
- *variables*: array of variables used in the query (note: this may contain
internal variables created by the optimizer)

View File

@ -1,7 +1,7 @@
!CHAPTER Parsing queries
Clients can use ArangoDB to check if a given AQL query is syntactically valid. ArangoDB provides
an [HTTP REST API](../HttpAqlQuery/README.md) for this.
an [HTTP REST API](../../HTTP/AqlQuery/index.html) for this.
A query can also be parsed from the ArangoShell using `ArangoStatement`'s `parse` method. The
`parse` method will throw an exception if the query is syntactically invalid. Otherwise, it will

View File

@ -123,6 +123,6 @@ Documents in the *_aqlfunctions* collection (or any other system collection)
should not be accessed directly, but only via the dedicated interfaces.
Keep in mind that system collections are excluded from dumps created with
[arangodump](../HttpBulkImports/Arangodump.md) by default. To include AQL user
[arangodump](../../Users/Administration/Arangodump.html) by default. To include AQL user
functions in a dump, the dump needs to be started with the
option *--include-system-collections true*.

View File

@ -10,7 +10,7 @@ var aqlfunctions = require("@arangodb/aql/functions");
To register a function, the fully qualified function name plus the
function code must be specified.
The [HTTP Interface](../HttpAqlUserFunctions/README.md) also offers User Functions management.
The [HTTP Interface](../../HTTP/AqlUserFunctions/index.html) also offers User Functions management.
!SUBSECTION Registering an AQL user function
@ -24,7 +24,7 @@ the string evaluates to a JavaScript function definition.
If a function identified by *name* already exists, the previous function
definition will be updated. Please also make sure that the function code
does not violate the [Conventions](../AqlExtending/Conventions.md) for AQL
does not violate the [Conventions](Conventions.md) for AQL
functions.
The *isDeterministic* attribute can be used to specify whether the

View File

@ -6,10 +6,10 @@ fully-featured programming language.
To add missing functionality or to simplify queries, users
may add their own functions to AQL in the selected database.
These functions can be written in JavaScript, and must be
registered via the API; see [Registering Functions](../AqlExtending/Functions.md).
registered via the API; see [Registering Functions](Functions.md).
In order to avoid conflicts with existing or future built-in
function names, all user functions must be put into separate
namespaces. Invoking a user functions is then possible by referring
to the fully-qualified function name, which includes the namespace,
too; see [Conventions](../AqlExtending/Conventions.md).
too; see [Conventions](Conventions.md).

View File

@ -1,220 +1,371 @@
!CHAPTER Array functions
AQL supports the following functions to operate on array values:
AQL provides functions for higher-level array manipulation. Also see the
[numeric functions](Numeric.md) for functions that work on number arrays.
Apart from that, AQL also offers several language constructs:
- *LENGTH(array)*: Returns the length (number of array elements) of *array*. If
*array* is an object / document, returns the number of attribute keys of the document,
regardless of their values. If *array* is a collection, returns the number of documents
in it.
- *FLATTEN(array, depth)*: Turns an array of arrays into a flat array. All
array elements in *array* will be expanded in the result array. Non-array elements
are added as they are. The function will recurse into sub-arrays up to a depth of
*depth*. *depth* has a default value of 1.
*Examples*
FLATTEN([ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ])
will produce:
[ 1, 2, 3, 4, 5, 6, 7, 8, [ 9, 10 ] ]
To fully flatten the array, use a *depth* of 2:
FLATTEN([ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ], 2)
This will produce:
[ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
- *REVERSE(array)*: Returns the elements in *array* in reversed order.
- *FIRST(array)*: Returns the first element in *array* or *null* if the
array is empty.
- *LAST(array)*: Returns the last element in *array* or *null* if the
array is empty.
- *NTH(array, position)*: Returns the array element at position *position*.
Positions start at 0. If *position* is negative or beyond the upper bound of the array
specified by *array*, then *null* will be returned.
- *POSITION(array, search, return-index)*: Returns the position of the
element *search* in *array*. Positions start at 0. If the element is not
found, then *-1* is returned. If *return-index* is *false*, then instead of the
position only *true* or *false* are returned, depending on whether the sought element
is contained in the array.
- *SLICE(array, start, length)*: Extracts a slice of the array specified
by *array*. The extraction will start at array element with position *start*.
Positions start at 0. Up to *length* elements will be extracted. If *length* is
not specified, all array elements starting at *start* will be returned.
If *start* is negative, it can be used to indicate positions from the end of the
array.
*Examples*
SLICE([ 1, 2, 3, 4, 5 ], 0, 1)
will return *[ 1 ]*
SLICE([ 1, 2, 3, 4, 5 ], 1, 2)
will return *[ 2, 3 ]*
SLICE([ 1, 2, 3, 4, 5 ], 3)
will return *[ 4, 5 ]*
SLICE([ 1, 2, 3, 4, 5 ], 1, -1)
will return *[ 2, 3, 4 ]*
SLICE([ 1, 2, 3, 4, 5 ], 0, -2)
will return *[ 1, 2, 3 ]*
- *UNIQUE(array)*: Returns all unique elements in *array*. To determine
uniqueness, the function will use the comparison order.
Calling this function may return the unique elements in any order.
- *UNION(array1, array2, ...)*: Returns the union of all arrays specified.
The function expects at least two array values as its arguments. The result is an array
of values in an undefined order.
Note: No duplicates will be removed. In order to remove duplicates, please use either
*UNION_DISTINCT* function or apply the *UNIQUE* on the result of *union*.
*Examples*
RETURN UNION(
[ 1, 2, 3 ],
[ 1, 2 ]
)
will produce:
[ [ 1, 2, 3, 1, 2 ] ]
with duplicate removal:
RETURN UNIQUE(
UNION(
[ 1, 2, 3 ],
[ 1, 2 ]
)
)
will produce:
[ [ 1, 2, 3 ] ]
- *UNION_DISTINCT(array1, array2, ...)*: Returns the union of distinct values of
all arrays specified. The function expects at least two array values as its arguments.
The result is an array of values in an undefined order.
- *MINUS(array1, array2, ...)*: Returns the difference of all arrays specified.
The function expects at least two array values as its arguments.
The result is an array of values that occur in the first array but not in any of the
subsequent arrays. The order of the result array is undefined and should not be relied on.
Note: duplicates will be removed.
- *INTERSECTION(array1, array2, ...)*: Returns the intersection of all arrays specified.
The function expects at least two array values as its arguments.
The result is an array of values that occur in all arguments. The order of the result array
is undefined and should not be relied on.
Note: Duplicates will be removed.
- *APPEND(array, values, unique)*: Adds all elements from the array *values* to the array
specified by *array*. If *unique* is set to true, then only those *values* will be added
that are not already contained in *array*.
The modified array is returned. All values are added at the end of the array (right side).
/* [ 1, 2, 3, 5, 6, 9 ] */
APPEND([ 1, 2, 3 ], [ 5, 6, 9 ])
/* [ 1, 2, 3, 4, 5, 9 ] */
APPEND([ 1, 2, 3 ], [ 3, 4, 5, 2, 9 ], true)
- *PUSH(array, value, unique)*: Adds *value* to the array specified by *array*. If
*unique* is set to true, then *value* is not added if already present in the array.
The modified array is returned. The value is added at the end of the array (right side).
Note: non-unique elements will not be removed from the array if they were already present
before the call to `PUSH`. The *unique* flag will only control if the value will
be added again to the array if already present. To make a array unique, use the `UNIQUE`
function.
/* [ 1, 2, 3, 4 ] */
PUSH([ 1, 2, 3 ], 4)
/* [ 1, 2, 3 ] */
PUSH([ 1, 2, 3 ], 2, true)
- *UNSHIFT(array, value, unique)*: Adds *value* to the array specified by *array*. If
*unique* is set to true, then *value* is not added if already present in the array.
The modified array is returned. The value is added at the start of the array (left side).
Note: non-unique elements will not be removed from the array if they were already present
before the call to `UNSHIFT`. The *unique* flag will only control if the value will
be added again to the array if already present. To make a array unique, use the `UNIQUE`
function.
/* [ 4, 1, 2, 3 ] */
UNSHIFT([ 1, 2, 3 ], 4)
/* [ 1, 2, 3 ] */
UNSHIFT([ 1, 2, 3 ], 2, true)
- *POP(array)*: Removes the element at the end (right side) of *array*. The modified array
is returned. If the array is already empty or *null*, an empty array is returned.
/* [ 1, 2, 3 ] */
POP([ 1, 2, 3, 4 ])
- *SHIFT(array)*: Removes the element at the start (left side) of *array*. The modified array
is returned. If the array is already empty or *null*, an empty array is returned.
/* [ 2, 3, 4 ] */
SHIFT([ 1, 2, 3, 4 ])
- *REMOVE_VALUE(array, value, limit)*: Removes all occurrences of *value* in the array
specified by *array*. If the optional *limit* is specified, only *limit* occurrences
will be removed.
/* [ "b", "b", "c" ] */
REMOVE_VALUE([ "a", "b", "b", "a", "c" ], "a")
/* [ "b", "b", "a", "c" ] */
REMOVE_VALUE([ "a", "b", "b", "a", "c" ], "a", 1)
- *REMOVE_VALUES(array, values)*: Removes all occurrences of any of the values specified
in array *values* from the array specified by *array*.
/* [ "b", "c", "e", "g" ] */
REMOVE_VALUES([ "a", "b", "c", "d", "e", "f", "g" ], [ "a", "f", "d" ])
- *REMOVE_NTH(array, position)*: Removes the element at position *position* from the
array specified by *array*. Positions start at 0. Negative positions are supported,
with -1 being the last array element. If *position* is out of bounds, the array is
returned unmodified. Otherwise, the modified array is returned.
/* [ "a", "c", "d", "e" ] */
REMOVE_NTH([ "a", "b", "c", "d", "e" ], 1)
/* [ "a", "b", "c", "e" ] */
REMOVE_NTH([ "a", "b", "c", "d", "e" ], -2)
Also see the [numeric functions](NumericFunctions.md) for more functions that work on
number arrays. Apart from that, AQL also offers several language constructs:
- [array operators](ArrayOperators.md) for array expansion and contraction,
- [array comparison operators](Operators.md#array-comparison-operators) to compare
- simple [array access](../Fundamentals/DataTypes.md#arrays) of individual elements,
- [array operators](../Advanced/ArrayOperators.md) for array expansion and contraction,
optionally with inline filter, limit and projection,
- [array comparison operators](../Operators.md#array-comparison-operators) to compare
each element in an array to a value or the elements of another array,
- operations for array manipulations like [FOR](../Operations/For.md),
- loop-based operations like [FOR](../Operations/For.md),
[SORT](../Operations/Sort.md), [LIMIT](../Operations/Limit.md),
as well as grouping with [COLLECT](../Operations/Collect.md),
which also offers efficient aggregation.
!SUBSECTION APPEND()
`APPEND(anyArray, values, unique) → newArray`
Add all elements of an array to another array. All values are added at the end of the
array (right side).
- **anyArray** (array): array with elements of arbitrary type
- **values** (array): array, whose elements shall be added to *anyArray*
- **unique** (bool, *optional*): if set to *true*, only those *values* will be added
that are not already contained in *anyArray*. The default is *false*.
- returns **newArray** (array): the modified array
```js
APPEND([ 1, 2, 3 ], [ 5, 6, 9 ])
// [ 1, 2, 3, 5, 6, 9 ]
APPEND([ 1, 2, 3 ], [ 3, 4, 5, 2, 9 ], true)
// [ 1, 2, 3, 4, 5, 9 ]
```
!SUBSECTION FIRST()
`FIRST(anyArray) → firstElement`
Get the first element of an array. It is the same as `anyArray[0]`.
- **anyArray** (array): array with elements of arbitrary type
- returns **firstElement** (any|null): the first element of *anyArray*, or *null* if
the array is empty.
!SUBSECTION FLATTEN()
`FLATTEN(anyArray, depth) → flatArray`
Turn an array of arrays into a flat array. All array elements in *array* will be
expanded in the result array. Non-array elements are added as they are. The function
will recurse into sub-arrays up to the specified depth. Duplicates will not be removed.
- **array** (array): array with elements of arbitrary type, including nested arrays
- **depth** (number, *optional*): flatten up to this many levels, the default is 1
- returns **flatArray** (array): a flattened array
```js
FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ] )
// [ 1, 2, 3, 4, 5, 6, 7, 8, [ 9, 10 ] ]
```
To fully flatten the example array, use a *depth* of 2:
```js
FLATTEN( [ 1, 2, [ 3, 4 ], 5, [ 6, 7 ], [ 8, [ 9, 10 ] ] ], 2 )
// [ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 ]
```
!SUBSECTION INTERSECTION()
`INTERSECTION(array1, array2, ... arrayN) → newArray`
Return the intersection of all arrays specified. The result is an array of values that
occur in all arguments.
- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple arguments
(at least 2)
- returns **newArray** (array): a single array with only the elements, which exist in all
provided arrays. The element order is random. Duplicates are removed.
!SUBSECTION LAST()
`LAST(anyArray) → lastElement`
Get the last element of an array. It is the same as `anyArray[-1]`.
- **anyArray** (array): array with elements of arbitrary type
- returns **lastElement** (any|null): the last element of *anyArray* or *null* if the
array is empty.
!SUBSECTION LENGTH()
`LENGTH(anyArray) → length`
Determine the number of elements in an array,
the [number of attribute keys](Document.md#length) of an object / document,
the [amount of documents](Miscellaneous.md#length) in a collection,
or the [character length](String.md#length) of a string.
- **anyArray** (array): array with elements of arbitrary type
- returns **length** (number): the number of array elements in *anyArray*.
!SUBSECTION MINUS()
`MINUS(array1, array2, ... arrayN) → newArray`
Return the difference of all arrays specified.
- **arrays** (array, *repeatable*): an arbitrary of arrays as multiple arguments,
at least two
- returns **newArray** (array): an array of values that occur in the first array,
but not in any of the subsequent arrays. The order of the result array is undefined
and should not be relied on. Duplicates will be removed.
!SUBSECTION NTH()
`NTH(anyArray, position) → nthElement`
Get the element of an array at a given position. It is the same as `anyArray[position]`
for positive positions, but does not support negative positions.
- **anyArray** (array): array with elements of arbitrary type
- **position** (number): position of desired element in array, positions start at 0
- returns **nthElement** (any|null): the array element at the given *position*.
If *position* is negative or beyond the upper bound of the array,
then *null* will be returned.
!SUBSECTION POP()
`POP(anyArray) → newArray`
Remove the element at the end (right side) of *array*.
- **anyArray** (array): an array with elements of arbitrary type
- returns **newArray** (array): *anyArray* without the last element. If it's already
empty or has only a single element left, an empty array is returned.
```js
POP( [ 1, 2, 3, 4 ] ) // [ 1, 2, 3 ]
POP( [ 1 ] ) // []
```
!SUBSECTION POSITION()
`POSITION(anyArray, search, returnIndex) → position`
Return whether *search* is contained in *array*. Optionally return the position.
- **anyArray** (array): the haystack, an array with elements of arbitrary type
- **search** (any): the needle, an element of arbitrary type
- **returnIndex** (bool, *optional*): if set to *true*, the position of the match
is returned instead of a boolean. The default is *false*.
- returns **position** (bool|number): *true* if *search* is contained in *anyArray*,
*false* otherwise. If *returnIndex* is enabled, the position of the match is
returned (positions start at 0), or *-1* if it's not found.
!SUBSECTION PUSH()
`PUSH(anyArray, value, unique) → newArray`
Append *value* to the array specified by *anyArray*.
- **anyArray** (array): array with elements of arbitrary type
- **value** (any): an element of arbitrary type
- **unique** (bool): if set to *true*, then *value* is not added if already
present in the array. The default is *false*.
- returns **newArray** (array): *anyArray* with *value* added at the end
(right side)
Note: The *unique* flag only controls if *value* is added if it's already present
in *anyArray*. Duplicate elements that already exist in *anyArray* will not be
removed. To make an array unique, use the [UNIQUE()](#unique) function.
```js
PUSH([ 1, 2, 3 ], 4)
// [ 1, 2, 3, 4 ]
PUSH([ 1, 2, 2, 3 ], 2, true)
// [ 1, 2, 2, 3 ]
```
!SUBSECTION REMOVE_NTH()
`REMOVE_NTH(anyArray, position) → newArray`
Remove the element at *position* from the *anyArray*.
- **anyArray** (array): array with elements of arbitrary type
- **position** (number): the position of the element to remove. Positions start
at 0. Negative positions are supported, with -1 being the last array element.
If *position* is out of bounds, the array is returned unmodified.
- returns **newArray** (array): *anyArray* without the element at *position*
```js
REMOVE_NTH( [ "a", "b", "c", "d", "e" ], 1 )
// [ "a", "c", "d", "e" ]
REMOVE_NTH( [ "a", "b", "c", "d", "e" ], -2 )
// [ "a", "b", "c", "e" ]
```
!SUBSECTION REMOVE_VALUE()
`REMOVE_VALUE(anyArray, value, limit) → newArray`
Remove all occurrences of *value* in *anyArray*. Optionally with a *limit*
to the number of removals.
- **anyArray** (array): array with elements of arbitrary type
- **value** (any): an element of arbitrary type
- **limit** (number, *optional*): cap the number of removals to this value
- returns **newArray** (array): *anyArray* with *value* removed
```js
REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a" )
// [ "b", "b", "c" ]
REMOVE_VALUE( [ "a", "b", "b", "a", "c" ], "a", 1 )
// [ "b", "b", "a", "c" ]
```
!SUBSECTION REMOVE_VALUES()
`REMOVE_VALUES(anyArray, values) → newArray`
Remove all occurrences of any of the *values* from *anyArray*.
- **anyArray** (array): array with elements of arbitrary type
- **values** (array): an array with elements of arbitrary type, that shall
be removed from *anyArray*
- returns **newArray** (array): *anyArray* with all individual *values* removed
```js
REMOVE_VALUES( [ "a", "a", "b", "c", "d", "e", "f" ], [ "a", "f", "d" ] )
// [ "b", "c", "e" ]
```
!SUBSECTION REVERSE()
`REVERSE(anyArray) → reversedArray`
Return an array with its elements reversed.
- **anyArray** (array): array with elements of arbitrary type
- returns **reversedArray** (array): a new array with all elements of *anyArray* in
reversed order
!SUBSECTION SHIFT()
`SHIFT(anyArray) → newArray`
Remove the element at the start (left side) of *anyArray*.
- **anyArray** (array): array with elements with arbitrary type
- returns **newArray** (array): *anyArray* without the left-most element. If *anyArray*
is already empty or has only one element left, an empty array is returned.
```js
SHIFT( [ 1, 2, 3, 4 ] ) // [ 2, 3, 4 ]
SHIFT( [ 1 ] ) // []
```
!SUBSECTION SLICE()
`SLICE(anyArray, start, length) → newArray`
Extract a slice of *anyArray*.
- **anyArray** (array): array with elements of arbitrary type
- **start** (number): start extraction at this element. Positions start at 0.
Negative values indicate positions from the end of the array.
- **length** (number, *optional*): extract up to *length* elements, or all
elements from *start* up to *length* if negative (exclusive)
- returns **newArray** (array): the specified slice of *anyArray*. If *length*
is not specified, all array elements starting at *start* will be returned.
```js
SLICE( [ 1, 2, 3, 4, 5 ], 0, 1 ) // [ 1 ]
SLICE( [ 1, 2, 3, 4, 5 ], 1, 2 ) // [ 2, 3 ]
SLICE( [ 1, 2, 3, 4, 5 ], 3 ) // [ 4, 5 ]
SLICE( [ 1, 2, 3, 4, 5 ], 1, -1 ) // [ 2, 3, 4 ]
SLICE( [ 1, 2, 3, 4, 5 ], 0, -2 ) // [ 1, 2, 3 ]
SLICE( [ 1, 2, 3, 4, 5 ], -3, 2 ) // [ 3, 4 ]
```
!SUBSECTION UNION()
`UNION(array1, array2, ... arrayN) → newArray`
Return the union of all arrays specified.
- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple
arguments, at least 2
- returns **newArray** (array): all array elements combined in a single array,
in any order
```js
UNION(
[ 1, 2, 3 ],
[ 1, 2 ]
)
// [ 1, 2, 3, 1, 2 ]
```
Note: No duplicates will be removed. In order to remove duplicates, please use
either [UNION_DISTINCT()](#uniondistinct) or apply [UNIQUE()](#unique) on the
result of *UNION()*:
```js
UNIQUE(
UNION(
[ 1, 2, 3 ],
[ 1, 2 ]
)
)
// [ 1, 2, 3 ]
```
!SUBSECTION UNION_DISTINCT()
`UNION_DISTINCT(array1, array2, ... arrayN) → newArray`
Return the union of distinct values of all arrays specified.
- **arrays** (array, *repeatable*): an arbitrary number of arrays as multiple
arguments, at least 2
- returns **newArray** (array): the elements of all given arrays in a single
array, without duplicates, in any order
```js
UNION_DISTINCT(
[ 1, 2, 3 ],
[ 1, 2 ]
)
// [ 1, 2, 3 ]
```
!SUBSECTION UNIQUE()
`UNIQUE(anyArray) → newArray`
Return all unique elements in *anyArray*. To determine uniqueness, the
function will use the comparison order.
- **anyArray** (array): array with elements of arbitrary type
- returns **newArray** (array): *anyArray* without duplicates, in any order
!SUBSECTION UNSHIFT()
`UNSHIFT(anyArray, value, unique) → newArray`
Prepend *value* to *anyArray*.
- **anyArray** (array): array with elements of arbitrary type
- **value** (any): an element of arbitrary type
- **unique** (bool): if set to *true*, then *value* is not added if already
present in the array. The default is *false*.
- returns **newArray** (array): *anyArray* with *value* added at the start
(left side)
Note: The *unique* flag only controls if *value* is added if it's already present
in *anyArray*. Duplicate elements that already exist in *anyArray* will not be
removed. To make an array unique, use the [UNIQUE()](#unique) function.
```js
UNSHIFT( [ 1, 2, 3 ], 4 ) // [ 4, 1, 2, 3 ]
UNSHIFT( [ 1, 2, 3 ], 2, true ) // [ 1, 2, 3 ]
```

View File

@ -1,67 +1,83 @@
!CHAPTER Date functions
AQL offers functionality to work with dates. Dates are no data types of their own in
AQL (neither are they in JSON, which is often used as a format to ship data into and
out of ArangoDB). Instead, dates in AQL are internally represented by either numbers
(timestamps) or strings. The date functions in AQL provide mechanisms to convert from
a numeric timestamp to a string representation and vice versa.
AQL (neither are they in JSON, which is usually used as format to ship data into and
out of ArangoDB). Instead, dates in AQL are typically represented by either numbers
(timestamps) or strings.
There are two date functions in AQL to create dates for further use:
- *DATE_TIMESTAMP(date)*: Creates a UTC timestamp value from *date*. The return
value has millisecond precision. To convert the return value to seconds, divide
it by 1000.
- *DATE_TIMESTAMP(year, month, day, hour, minute, second, millisecond)*:
Same as before, but allows specifying the individual date components separately.
All parameters after *day* are optional.
- *DATE_ISO8601(date)*: Returns an ISO8601 date time string from *date*.
The date time string will always use UTC time, indicated by the *Z* at its end.
- *DATE_ISO8601(year, month, day, hour, minute, second, millisecond)*:
same as before, but allows specifying the individual date components separately.
All parameters after *day* are optional.
These two above date functions accept the following input values:
All functions that require dates as arguments accept the following input values:
- numeric timestamps, indicating the number of milliseconds elapsed since the UNIX
epoch (i.e. January 1st 1970 00:00:00 UTC).
An example timestamp value is *1399472349522*, which translates to
epoch (i.e. January 1st 1970 00:00:00.000 UTC).
An example timestamp value is *1399472349522*, which translates to
*2014-05-07T14:19:09.522Z*.
- date time strings in formats *YYYY-MM-DDTHH:MM:SS.MMM*,
- date time strings in formats *YYYY-MM-DDTHH:MM:SS.MMM*,
*YYYY-MM-DD HH:MM:SS.MMM*, or *YYYY-MM-DD* Milliseconds are always optional.
A timezone difference may optionally be added at the end of the string, with the
hours and minutes that need to be added or subtracted to the date time value.
For example, *2014-05-07T14:19:09+01:00* can be used to specify a one hour offset,
and *2014-05-07T14:19:09+07:30* can be specified for seven and half hours offset.
and *2014-05-07T14:19:09+07:30* can be specified for seven and half hours offset.
Negative offsets are also possible. Alternatively to an offset, a *Z* can be used
to indicate UTC / Zulu time.
An example value is *2014-05-07T14:19:09.522Z* meaning May 7th 2014, 14:19:09 and
522 milliseconds, UTC / Zulu time. Another example value without time component is
to indicate UTC / Zulu time.
An example value is *2014-05-07T14:19:09.522Z* meaning May 7th 2014, 14:19:09 and
522 milliseconds, UTC / Zulu time. Another example value without time component is
*2014-05-07Z*.
Please note that if no timezone offset is specified in a date string, ArangoDB will
assume UTC time automatically. This is done to ensure portability of queries across
servers with different timezone settings, and because timestamps will always be
UTC-based.
UTC-based.
- individual date components as separate function arguments, in the following order:
- year
- month
- day
- hour
- minute
- second
- millisecond
```js
DATE_HOUR( 2 * 60 * 60 * 1000 ) // 2
DATE_HOUR("1970-01-01T02:00:00") // 2
```
All components following *day* are optional and can be omitted. Note that no
timezone offsets can be specified when using separate date components, and UTC /
Zulu time will be used.
The following calls to *DATE_TIMESTAMP* are equivalent and will all return
You are free to store age determinations of specimens, incomplete or fuzzy dates and
the like in different, more appropriate ways of course. AQL's date functions will
most certainly not be of any help for such dates, but you can still use language
constructs like [SORT](../Operations/Sort.md) (which also supports sorting of arrays)
and [indexes](../../Users/Indexing/index.html) like skiplists.
!SECTION Current date and time
!SUBSECTION DATE_NOW()
`DATE_NOW() → timestamp`
Get the current date time as numeric timestamp.
- returns **timestamp** (number): the current time as a timestamp.
The return value has millisecond precision. To convert the return value to
seconds, divide it by 1000.
Note that this function is evaluated on every invocation and may return
different values when invoked multiple times in the same query. Assign it
to a variable to use the exact same timestamp multiple times.
!SECTION Conversion
*DATE_TIMESTAMP()* and *DATE_ISO8601()* can be used to convert ISO 8601 date time
strings to numeric timestamps and numeric timestamps to ISO 8601 date time strings.
Both also support individual date components as separate function arguments,
in the following order:
- year
- month
- day
- hour
- minute
- second
- millisecond
All components following *day* are optional and can be omitted. Note that no
timezone offsets can be specified when using separate date components, and UTC /
Zulu time will be used.
The following calls to *DATE_TIMESTAMP()* are equivalent and will all return
*1399472349522*:
```js
@ -73,7 +89,7 @@ DATE_TIMESTAMP(2014, 5, 7, 14, 19, 9, 522)
DATE_TIMESTAMP(1399472349522)
```
The same is true for calls to *DATE_ISO8601* that also accepts variable input
The same is true for calls to *DATE_ISO8601()* that also accepts variable input
formats:
```js
@ -85,66 +101,297 @@ DATE_ISO8601(1399472349522)
The above functions are all equivalent and will return *"2014-05-07T14:19:09.522Z"*.
The following date functions can be used with dates created by *DATE_TIMESTAMP* and
*DATE_ISO8601*:
!SUBSECTION DATE_ISO8601()
- *DATE_DAYOFWEEK(date)*: Returns the weekday number of *date*. The
return values have the following meanings:
- 0: Sunday
- 1: Monday
- 2: Tuesday
- 3: Wednesday
- 4: Thursday
- 5: Friday
- 6: Saturday
`DATE_ISO8601(date) → dateString`
- *DATE_YEAR(date)*: Returns the year part of *date* as a number.
Return an ISO 8601 date time string from *date*.
The date time string will always use UTC / Zulu time, indicated by the *Z* at its end.
- *DATE_MONTH(date)*: Returns the month part of *date* as a number.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **dateString**: date and time expressed according to ISO 8601, in Zulu time
- *DATE_DAY(date)*: Returns the day part of *date* as a number.
`DATE_ISO8601(year, month, day, hour, minute, second, millisecond) → dateString`
- *DATE_HOUR(date)*: Returns the hour part of *date* as a number.
Return a ISO 8601 date time string from *date*, but allows to specify the individual
date components separately. All parameters after *day* are optional.
- *DATE_MINUTE(date)*: Returns the minute part of *date* as a number.
- **year** (number): typically in the range 0..9999, e.g. *2017*
- **month** (number): 1..12 for January through December
(unlike JavaScript, which uses the slightly confusing range 0..11)
- **day** (number): 1..31 (upper bound depends on number of days in month)
- **hour** (number, *optional*): 0..23
- **minute** (number, *optional*): 0..59
- **second** (number, *optional*): 0..59
- **milliseconds** (number, *optional*): 0..999
- returns **dateString**: date and time expressed according to ISO 8601, in Zulu time
- *DATE_SECOND(date)*: Returns the seconds part of *date* as a number.
!SUBSECTION DATE_TIMESTAMP()
- *DATE_MILLISECOND(date)*: Returns the milliseconds part of *date* as a number.
`DATE_TIMESTAMP(date) → timestamp`
- *DATE_DAYOFYEAR(date)*: Returns the day of year number of *date*.
Create a UTC timestamp value from *date*. The return value has millisecond precision.
To convert the return value to seconds, divide it by 1000.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **timestamp** (number): numeric timestamp
`DATE_TIMESTAMP(year, month, day, hour, minute, second, millisecond) → timestamp`
Create a UTC timestamp value, but allows to specify the individual date components
separately. All parameters after *day* are optional.
- **year** (number): typically in the range 0..9999, e.g. *2017*
- **month** (number): 1..12 for January through December
(unlike JavaScript, which uses the slightly confusing range 0..11)
- **day** (number): 1..31 (upper bound depends on number of days in month)
- **hour** (number, *optional*): 0..23
- **minute** (number, *optional*): 0..59
- **second** (number, *optional*): 0..59
- **milliseconds** (number, *optional*): 0..999
- returns **timestamp** (number): numeric timestamp
Negative values are not allowed, result in *null* and cause a warning.
Values greater than the upper range bound overflow to the larger components
(e.g. an hour of 26 is automatically turned into an additional day and two hours):
```js
DATE_TIMESTAMP(2016, 12, -1) // returns null and issues a warning
DATE_TIMESTAMP(2016, 2, 32) // returns 1456963200000, which is March 3rd, 2016
DATE_TIMESTAMP(1970, 1, 1, 26) // returns 93600000, which is January 2nd, 1970, at 2 a.m.
```
!SUBSECTION IS_DATESTRING()
`IS_DATESTRING(value) → bool`
Check if an arbitrary string is suitable for interpretation as date time string.
- **value** (string): an arbitrary string
- returns **bool** (bool): *true* if *value* is a string that can be used
in a date function. This includes partial dates such as *2015* or *2015-10* and
strings containing invalid dates such as *2015-02-31*. The function will return
*false* for all non-string values, even if some of them may be usable in date
functions.
!SECTION Processing
!SUBSECTION DATE_DAYOFWEEK()
`DATE_DAYOFWEEK(date) → weekdayNumber`
Return the weekday number of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **weekdayNumber** (number): 0..6 as follows:
- 0 Sunday
- 1 Monday
- 2 Tuesday
- 3 Wednesday
- 4 Thursday
- 5 Friday
- 6 Saturday
!SUBSECTION DATE_YEAR()
`DATE_YEAR(date) → year`
Return the year of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **year** (number): the year part of *date* as a number
!SUBSECTION DATE_MONTH()
`DATE_MONTH(date) → month`
Return the month of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **month** (number): the month part of *date* as a number
!SUBSECTION DATE_DAY()
`DATE_DAY(date) → day`
Return the day of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **day** (number): the day part of *date* as a number
!SUBSECTION DATE_HOUR()
Return the hour of *date*.
`DATE_HOUR(date) → hour`
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **hour** (number): the hour part of *date* as a number
!SUBSECTION DATE_MINUTE()
`DATE_MINUTE(date) → minute`
Return the minute of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **minute** (number): the minute part of *date* as a number
!SUBSECTION DATE_SECOND()
`DATE_SECOND(date) → second`
Return the second of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **second** (number): the seconds part of *date* as a number
!SUBSECTION DATE_MILLISECOND()
`DATE_MILLISECOND(date) → millisecond`
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **millisecond** (number): the milliseconds part of *date* as a number
!SUBSECTION DATE_DAYOFYEAR()
`DATE_DAYOFYEAR(date) → dayOfYear`
Return the day of year of *date*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **dayOfYear** (number): the day of year number of *date*.
The return values range from 1 to 365, or 366 in a leap year respectively.
- *DATE_ISOWEEK(date)*: Returns the ISO week date of *date*. The return values
!SUBSECTION DATE_ISOWEEK()
`DATE_ISOWEEK(date) → weekDate`
Return the week date of *date* according to ISO 8601.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **weekDate** (number): the ISO week date of *date*. The return values
range from 1 to 53. Monday is considered the first day of the week. There are no
fractional weeks, thus the last days in December may belong to the first week of
the next year, and the first days in January may be part of the previous year's
last week.
- *DATE_LEAPYEAR(date)*: Returns whether the year of *date* is a leap year.
!SUBSECTION DATE_LEAPYEAR()
- *DATE_QUARTER(date)*: Returns the quarter of the given date (1-based):
`DATE_LEAPYEAR(date) → leapYear`
- 1: January, February, March
- 2: April, May, June
- 3: July, August, September
- 4: October, November, December
Return whether *date* is in a leap year.
- *DATE_DAYS_IN_MONTH(date)*: Returns the number of days in *date*'s month (28..31).
The following other date functions are also available:
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **leapYear** (bool): *true* if *date* is in a leap year, *false* otherwise
- *DATE_NOW()*: Returns the current time as a timestamp.
The return value has millisecond precision. To convert the return value to
seconds, divide it by 1000.
Note that this function is evaluated on every invocation and may return
different values when invoked multiple times in the same query.
!SUBSECTION DATE_QUARTER()
- *DATE_ADD(date, amount, unit)*: Adds *amount* given in *unit* to *date* and
returns the calculated date.
`DATE_QUARTER(date) → quarter`
*unit* can be either of the following to specify the time unit to add or
Return which quarter *date* belongs to.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **quarter** (number): the quarter of the given date (1-based):
- 1 January, February, March
- 2 April, May, June
- 3 July, August, September
- 4 October, November, December
!SUBSECTION DATE_DAYS_IN_MONTH()
Return the number of days in the month of *date*.
`DATE_DAYS_IN_MONTH(date) → daysInMonth`
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- returns **daysInMonth** (number): the number of days in *date*'s month (28..31)
!SUBSECTION DATE_FORMAT()
`DATE_FORMAT(date, format) → str`
Format a date according to the given format string.
- **date** (string|number): a date string or timestamp
- **format** (string): a format string, see below
- returns **str** (string): a formatted date string
*format* supports the following placeholders (case-insensitive):
- %t timestamp, in milliseconds since midnight 1970-01-01
- %z ISO date (0000-00-00T00:00:00.000Z)
- %w day of week (0..6)
- %y year (0..9999)
- %yy year (00..99), abbreviated (last two digits)
- %yyyy year (0000..9999), padded to length of 4
- %yyyyyy year (-009999 .. +009999), with sign prefix and padded to length of 6
- %m month (1..12)
- %mm month (01..12), padded to length of 2
- %d day (1..31)
- %dd day (01..31), padded to length of 2
- %h hour (0..23)
- %hh hour (00..23), padded to length of 2
- %i minute (0..59)
- %ii minute (00..59), padded to length of 2
- %s second (0..59)
- %ss second (00..59), padded to length of 2
- %f millisecond (0..999)
- %fff millisecond (000..999), padded to length of 3
- %x day of year (1..366)
- %xxx day of year (001..366), padded to length of 3
- %k ISO week date (1..53)
- %kk ISO week date (01..53), padded to length of 2
- %l leap year (0 or 1)
- %q quarter (1..4)
- %a days in month (28..31)
- %mmm abbreviated English name of month (Jan..Dec)
- %mmmm English name of month (January..December)
- %www abbreviated English name of weekday (Sun..Sat)
- %wwww English name of weekday (Sunday..Saturday)
- %& special escape sequence for rare occasions
- %% literal %
- % ignored
`%yyyy` does not enforce a length of 4 for years before 0 and past 9999.
The same format as for `%yyyyyy` will be used instead. `%yy` preserves the
sign for negative years and may thus return 3 characters in total.
Single `%` characters will be ignored. Use `%%` for a literal `%`. To resolve
ambiguities like in `%mmonth` (unpadded month number + the string "month")
between `%mm` + "onth" and `%m` + "month", use the escape sequence `%&`:
`%m%&month`.
Note that this is a rather costly operation and may not be suitable for large
datasets (like over 1 million dates). If possible, avoid formatting dates on
server-side and leave it up to the client to do so. This function should only
be used for special date comparisons or to store the formatted dates in the
database. For better performance, use the primitive `DATE_*()` functions
together with `CONCAT()` if possible.
Examples:
```js
DATE_FORMAT(DATE_NOW(), "%q/%yyyy") // quarter and year (e.g. "3/2015")
DATE_FORMAT(DATE_NOW(), "%dd.%mm.%yyyy %hh:%ii:%ss,%fff") // e.g. "18.09.2015 15:30:49,374"
DATE_FORMAT("1969", "Summer of '%yy") // "Summer of '69"
DATE_FORMAT("2016", "%%l = %l") // "%l = 1" (2016 is a leap year)
DATE_FORMAT("2016-03-01", "%xxx%") // "063", trailing % ignored
```
!SECTION Comparison and calculation
!SUBSECTION DATE_ADD()
`DATE_ADD(date, amount, unit) → isoDate`
Add *amount* given in *unit* to *date* and return the calculated date.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- **amount** (number|string): number of *unit*s to add (positive value) or
subtract (negative value). It is recommended to use positive values only,
and use [DATE_SUBTRACT()](#datesubtract) for subtractions instead.
- **unit** (string): either of the following to specify the time unit to add or
subtract (case-insensitive):
- y, year, years
- m, month, months
@ -154,12 +401,7 @@ The following other date functions are also available:
- i, minute, minutes
- s, second, seconds
- f, millisecond, milliseconds
*amount* is the number of *unit*s to add (positive value) or subtract
(negative value). It is recommended to use positive values only, and use
`DATE_SUBTRACT()` for subtractions instead.
Examples:
- returns **isoDate** (string): the calculated ISO 8601 date time string
```js
DATE_ADD(DATE_NOW(), -1, "day") // yesterday; also see DATE_SUBTRACT()
@ -170,23 +412,28 @@ DATE_ADD(DATE_TIMESTAMP(DATE_YEAR(DATE_NOW()), 12, 24), -4, "years") // Christma
DATE_ADD(DATE_ADD("2016-02", "month", 1), -1, "day") // last day of February (29th, because 2016 is a leap year!)
```
You may also pass an ISO duration string as *amount* and leave out *unit*.
The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and
letters for time intervals - except for `P` (period) and `T` (time).
The meaning of the other letters are:
- Y: years
- M: months (if before T)
- W: weeks
- D: days
- H: hours
- M: minutes (if after T)
- S: seconds (optionally with 3 decimal places for milliseconds)
`DATE_ADD(date, isoDuration) → isoDate`
The string must be prefixed by a `P`. A separating `T` is only required if
`H`, `M` and/or `S` are specified. You only need to specify the needed pairs
of letters and numbers.
You may also pass an ISO duration string as *amount* and leave out *unit*.
Examples:
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- **isoDuration** (string): an ISO 8601 duration string to add to *date*, see below
- returns **isoDate** (string): the calculated ISO 8601 date time string
The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and
letters for time intervals - except for the separators `P` (period) and `T` (time).
The meaning of the other letters are:
- Y years
- M months (if before T)
- W weeks
- D days
- H hours
- M minutes (if after T)
- S seconds (optionally with 3 decimal places for milliseconds)
The string must be prefixed by a `P`. A separating `T` is only required if
`H`, `M` and/or `S` are specified. You only need to specify the needed pairs
of letters and numbers.
```js
DATE_ADD(DATE_NOW(), "P1Y") // add 1 year
@ -197,15 +444,56 @@ DATE_ADD("2000-01-01", "PT30M44.4S" // add 30 minutes, 44 seconds and 400 ms
DATE_ADD("2000-01-01", "P1Y2M3W4DT5H6M7.89S" // add a bit of everything
```
- *DATE_SUBTRACT(date, amount, unit)*: Subtracts *amount* given in *unit* from
*date* and returns the calculated date.
It works the same as *DATE_ADD()*, except that it subtracts. It is equivalent
to calling *DATE_ADD()* with a negative amount, except that *DATE_SUBTRACT()*
can also subtract ISO durations. Note that negative ISO durations are not
supported (i.e. starting with `-P`, like `-P1Y`).
!SUBSECTION DATE_SUBTRACT()
Examples:
`DATE_SUBTRACT(date, amount, unit) → isoDate`
Subtract *amount* given in *unit* from *date* and return the calculated date.
It works the same as [DATE_ADD()](#dateadd), except that it subtracts. It is
equivalent to calling *DATE_ADD()* with a negative amount, except that
*DATE_SUBTRACT()* can also subtract ISO durations. Note that negative ISO
durations are not supported (i.e. starting with `-P`, like `-P1Y`).
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- **amount** (number|string): number of *unit*s to subtract (positive value) or
add (negative value). It is recommended to use positive values only,
and use [DATE_ADD()](#dateadd) for additions instead.
- **unit** (string): either of the following to specify the time unit to add or
subtract (case-insensitive):
- y, year, years
- m, month, months
- w, week, weeks
- d, day, days
- h, hour, hours
- i, minute, minutes
- s, second, seconds
- f, millisecond, milliseconds
- returns **isoDate** (string): the calculated ISO 8601 date time string
`DATE_SUBTRACT(date, isoDuration) → isoDate`
You may also pass an ISO duration string as *amount* and leave out *unit*.
- **date** (number|string): numeric timestamp or ISO 8601 date time string
- **isoDuration** (string): an ISO 8601 duration string to subtract from *date*,
see below
- returns **isoDate** (string): the calculated ISO 8601 date time string
The format is `P_Y_M_W_DT_H_M_._S`, where underscores stand for digits and
letters for time intervals - except for the separators `P` (period) and `T` (time).
The meaning of the other letters are:
- Y years
- M months (if before T)
- W weeks
- D days
- H hours
- M minutes (if after T)
- S seconds (optionally with 3 decimal places for milliseconds)
The string must be prefixed by a `P`. A separating `T` is only required if
`H`, `M` and/or `S` are specified. You only need to specify the needed pairs
of letters and numbers.
```js
DATE_SUBTRACT(DATE_NOW(), 1, "day") // yesterday
@ -215,11 +503,16 @@ DATE_SUBTRACT(DATE_NOW(), "P4D") // four days ago
DATE_SUBTRACT(DATE_NOW(), "PT1H3M") // 1 hour and 30 minutes ago
```
- *DATE_DIFF(date1, date2, unit, asFloat)*: Calculate the difference
between two dates in given time *unit*, optionally with decimal places.
Returns a negative value if *date1* is greater than / after *date2*.
!SUBSECTION DATE_DIFF()
*unit* can be either of the following to specify the time unit to return the
`DATE_DIFF(date1, date2, unit, asFloat) → diff`
Calculate the difference between two dates in given time *unit*, optionally
with decimal places.
- **date1** (number|string): numeric timestamp or ISO 8601 date time string
- **date2** (number|string): numeric timestamp or ISO 8601 date time string
- **unit** (string): either of the following to specify the time unit to return the
difference in (case-insensitive):
- y, year, years
- m, month, months
@ -229,27 +522,38 @@ DATE_SUBTRACT(DATE_NOW(), "PT1H3M") // 1 hour and 30 minutes ago
- i, minute, minutes
- s, second, seconds
- f, millisecond, milliseconds
- **asFloat** (boolean, *optional*): if set to *true*, decimal places will be
preserved in the result. The default is *false* and an integer is returned.
- returns **diff** (number): the calculated difference as number in *unit*.
The value will be negative if *date2* is before *date1*.
- *DATE_COMPARE(date1, date2, unitRangeStart, unitRangeEnd)*: Compare two
partial dates and return true if they match, false otherwise. The parts to
compare are defined by a range of time units.
The full range is: years, months, days, hours, minutes, seconds, milliseconds.
Pass the unit to start from as *unitRangeStart*, and the unit to end with as
*unitRangeEnd*. All units in between will be compared. Leave out *unitRangeEnd*
to only compare *unitRangeStart*. You can refer to the units as:
- y, year, years
- m, month, months
- d, day, days
- h, hour, hours
- i, minute, minutes
- s, second, seconds
- f, millisecond, milliseconds
!SUBSECTION DATE_COMPARE()
An error is raised if *unitRangeEnd* is a unit before *unitRangeStart*.
`DATE_COMPARE(date1, date2, unitRangeStart, unitRangeEnd) → bool`
Examples:
Check if two partial dates match.
- **date1** (number|string): numeric timestamp or ISO 8601 date time string
- **date2** (number|string): numeric timestamp or ISO 8601 date time string
- **unitRangeStart** (string): unit to start from, see below
- **unitRangeEnd** (string, *optional*): unit to end with, leave out to only
compare the component as specified by *unitRangeStart*. An error is raised if
*unitRangeEnd* is a unit before *unitRangeStart*.
- returns **bool** (bool): *true* if the dates match, *false* otherwise
The parts to compare are defined by a range of time units. The full range is:
years, months, days, hours, minutes, seconds, milliseconds (in this order).
All components of *date1* and *date2* as specified by the range will be compared.
You can refer to the units as:
- y, year, years
- m, month, months
- d, day, days
- h, hour, hours
- i, minute, minutes
- s, second, seconds
- f, millisecond, milliseconds
```js
// Compare months and days, true on birthdays if you're born on 4th of April
@ -263,13 +567,13 @@ DATE_COMPARE("1984-02-29", DATE_NOW(), "months", days")
DATE_COMPARE("2001-01-01T15:30:45.678Z", "2001-01-01T08:08:08.008Z", "years", "days")
```
You can directly compare ISO date **strings** if you want to find dates before or
after a certain date, or in between two dates (`>=`, `>`, `<`, `<=`).
No special date function is required. Equality tests (`==` and `!=`) will only
match the exact same date and time however. You may use `SUBSTRING()` to
compare partial date strings, `DATE_COMPARE()` is basically a convenience
function for that. However, neither is really required to limit a search to a
certain day for instance:
You can directly compare ISO date **strings** if you want to find dates before or
after a certain date, or in between two dates (`>=`, `>`, `<`, `<=`).
No special date function is required. Equality tests (`==` and `!=`) will only
match the exact same date and time however. You may use `SUBSTRING()` to
compare partial date strings, `DATE_COMPARE()` is basically a convenience
function for that. However, neither is really required to limit a search to a
certain day as demonstrated here:
```js
FOR doc IN coll
@ -277,17 +581,19 @@ FOR doc IN coll
RETURN doc
```
Every ISO date on that day is greater than `2015-05-15` in a string comparison
(e.g. `2015-05-15T11:30:00.000Z`). The time components will be "ignored". The
equal sign in `>=` merely helps to express the semantic. Dates before
`2015-05-15` are less and therefore filtered out. The second condition works
likewise. The query will return every document with `date` ranging from
`2015-05-15T00:00:00.000Z` to `2015-05-15T23:99:99.999Z`. It would also include
`2015-05-15T24:00:00.000Z`, but that date is actually `2015-05-16T00:00:00.000Z`
and can only occur if inserted manually.
Leap days in leap years (29th of February) must be always handled manually,
if you require so (e.g. birthday checks):
Every ISO date on that day is greater than or equal to `2015-05-15` in a string
comparison (e.g. `2015-05-15T11:30:00.000Z`). Dates before `2015-05-15` are smaller
and therefore filtered out by the first condition. Every date past `2015-05-15` is
greater than this date in a string comparison, and therefore filtered out by the
second condition. The result is that the time components in the dates you compare
with are "ignored". The query will return every document with *date* ranging from
`2015-05-15T00:00:00.000Z` to `2015-05-15T23:99:99.999Z`. It would also include
`2015-05-15T24:00:00.000Z`, but that date is actually `2015-05-16T00:00:00.000Z`
and can only occur if inserted manually (you may want to pass dates through
[DATE_ISO8601()](#dateiso8601) to ensure a correct date representation).
Leap days in leap years (29th of February) must be always handled manually,
if you require so (e.g. birthday checks):
```js
LET today = DATE_NOW()
@ -308,73 +614,6 @@ FOR user IN users
RETURN user
```
- *DATE_FORMAT(date, format)*: Format a date according to the given format string.
It supports the following placeholders (case-insensitive):
- %t: timestamp, in milliseconds since midnight 1970-01-01
- %z: ISO date (0000-00-00T00:00:00.000Z)
- %w: day of week (0..6)
- %y: year (0..9999)
- %yy: year (00..99), abbreviated (last two digits)
- %yyyy: year (0000..9999), padded to length of 4
- %yyyyyy: year (-009999 .. +009999), with sign prefix and padded to length of 6
- %m: month (1..12)
- %mm: month (01..12), padded to length of 2
- %d: day (1..31)
- %dd: day (01..31), padded to length of 2
- %h: hour (0..23)
- %hh: hour (00..23), padded to length of 2
- %i: minute (0..59)
- %ii: minute (00..59), padded to length of 2
- %s: second (0..59)
- %ss: second (00..59), padded to length of 2
- %f: millisecond (0..999)
- %fff: millisecond (000..999), padded to length of 3
- %x: day of year (1..366)
- %xxx: day of year (001..366), padded to length of 3
- %k: ISO week date (1..53)
- %kk: ISO week date (01..53), padded to length of 2
- %l: leap year (0 or 1)
- %q: quarter (1..4)
- %a: days in month (28..31)
- %mmm: abbreviated English name of month (Jan..Dec)
- %mmmm: English name of month (January..December)
- %www: abbreviated English name of weekday (Sun..Sat)
- %wwww: English name of weekday (Sunday..Saturday)
- %&: special escape sequence for rare occasions
- %%: literal %
- %: ignored
`%yyyy` does not enforce a length of 4 for years before 0 and past 9999.
The same format as for `%yyyyyy` will be used instead. `%yy` preserves the
sign for negative years and may thus return 3 characters in total.
Single `%` characters will be ignored. Use `%%` for a literal `%`. To resolve
ambiguities like in `%mmonth` (unpadded month number + the string "month")
between `%mm` + "onth" and `%m` + "month", use the escape sequence `%&`:
`%m%&month`.
Note that this is a rather costly operation and may not be suitable for large
datasets (like over 1 million dates). If possible, avoid formatting dates on
server-side and leave it up to the client to do so. This function should only
be used for special date comparisons or to store the formatted dates in the
database. For better performance, use the primitive `DATE_*()` functions
together with `CONCAT()` if possible.
Examples:
```js
DATE_FORMAT(DATE_NOW(), "%q/%yyyy") // quarter and year (e.g. "3/2015")
DATE_FORMAT(DATE_NOW(), "%dd.%mm.%yyyy %hh:%ii:%ss,%fff") // e.g. "18.09.2015 15:30:49,374"
DATE_FORMAT("1969", "Summer of '%yy") // "Summer of '69"
DATE_FORMAT("2016", "%%l = %l") // "%l = 1" (2016 is a leap year)
DATE_FORMAT("2016-03-01", "%xxx%") // "063", trailing % ignored
```
- *IS_DATESTRING(value)*: Returns true if *value* is a string that can be used
in a date function. This includes partial dates such as *2015* or *2015-10* and
strings containing invalid dates such as *2015-02-31*. The function will return
false for all non-string values, even if some of them may be usable in date functions.
!SECTION Working with dates and indices
There are two recommended ways to store timestamps in ArangoDB:
@ -382,8 +621,12 @@ There are two recommended ways to store timestamps in ArangoDB:
- as [Epoch number](https://en.wikipedia.org/wiki/Epoch_%28reference_date%29)
The sort order of both is identical due to the sort properties of ISO date strings.
Therefore, you can work with [skiplist indices](../IndexHandling/Skiplist.md) and use
string comparisons (less than, greater than, in, equality) to express time ranges in your queries:
You can't mix both types, numbers and strings, in a single attribute however.
You can use [skiplist indices](../../Users/Indexing/Skiplist.html) with both date types.
When chosing string representations, you can work with string comparisons (less than,
greater than etc.) to express time ranges in your queries while still utilizing
skiplist indices:
@startDocuBlockInline working_with_date_time
@EXAMPLE_ARANGOSH_OUTPUT{working_with_date_time}

View File

@ -1,181 +1,423 @@
!CHAPTER Document functions
AQL supports the following functions to operate on document values:
AQL provides below listed functions to operate on objects / document values.
Also see [object access](../Fundamentals/DataTypes.md#objects-documents) for
additional language constructs.
- *MATCHES(document, examples, return-index)*: Compares the document
*document* against each example document provided in the array *examples*.
If *document* matches one of the examples, *true* is returned, and if there is
no match *false* will be returned. The default return value type can be changed by
passing *true* as the third function parameter *return-index*. Setting this
flag will return the index of the example that matched (starting at offset 0), or
*-1* if there was no match.
!SUBSECTION ATTRIBUTES()
The comparisons will be started with the first example. All attributes of the example
will be compared against the attributes of *document*. If all attributes match, the
comparison stops and the result is returned. If there is a mismatch, the function will
continue the comparison with the next example until there are no more examples left.
`ATTRIBUTES(document, removeInternal, sort) → attributes`
The *examples* must be an array of 1..n example documents, with any number of attributes
each. Note: specifying an empty array of examples is not allowed.
**Examples**
- **document** (object): an arbitrary document / object
- **removeInternal** (bool, *optional*): whether all system attributes (*_key*, *_id* etc.,
every attribute key that starts with an underscore) shall be omitted in the result.
The default is *false*.
- **sort** (bool, *optional*): optionally sort the resulting array alphabetically.
The default is *false* and will return the attribute names in a random order.
- returns **attributes** (array): the attribute keys of the input **document** as an
array of strings
```js
ATTRIBUTES( {"foo": "bar", "_key": "123", "_custom": "yes" } )
// [ "foo", "_key", "_custom" ]
MATCHES(
{ "test" : 1 }, [
{ "test" : 1, "foo" : "bar" },
{ "foo" : 1 },
{ "test : 1 }
], true)
ATTRIBUTES( {"foo": "bar", "_key": "123", "_custom": "yes" }, true )
// [ "foo" ]
This will return *2*, because the third example matches, and because the
*return-index* flag is set to *true*.
ATTRIBUTES( {"foo": "bar", "_key": "123", "_custom": "yes" }, false, true )
// [ "_custom", "_key", "foo" ]
```
- *MERGE(document1, document2, ... documentN)*: Merges the documents
in *document1* to *documentN* into a single document. If document attribute
keys are ambiguous, the merged result will contain the values of the documents
contained later in the argument list.
Complex example to count how often every attribute key occurs in the documents
of *collection* (expensive on large collections):
For example, two documents with distinct attribute names can easily be merged into one:
```js
LET attributesPerDocument = (
FOR doc IN collection RETURN ATTRIBUTES(doc, true)
)
FOR attributeArray IN attributesPerDocument
FOR attribute IN attributeArray
COLLECT attr = attribute WITH COUNT INTO count
SORT count DESC
RETURN {attr, count}
```
/* { "user1" : { "name" : "J" }, "user2" : { "name" : "T" } } */
MERGE(
{ "user1" : { "name" : "J" } },
{ "user2" : { "name" : "T" } }
)
!SUBSECTION HAS()
When merging documents with identical attribute names, the attribute values of the
latter documents will be used in the end result:
`HAS(document, attributeName) → isPresent`
/* { "users" : { "name" : "T" } } */
MERGE(
{ "users" : { "name" : "J" } },
{ "users" : { "name" : "T" } }
)
Test whether an attribute is present in the provided document.
*MERGE* works with a single array parameter, too. This variant allows combining the
attributes of multiple objects from the array into a single object, e.g.
- **document** (object): an arbitrary document / object
- **attributeName** (string): the attribute key to test for
- returns **isPresent** (bool): *true* if *document* has an attribute named
*attributeName*, and *false* otherwise. An attribute with a falsy value (*0*, *false*,
empty string *""*) or *null* is also considered as present and returns *true*.
RETURN MERGE([
{ foo: 'bar' },
{ quux: 'quetzalcoatl', ruled: true },
{ bar: 'baz', foo: 'done' }
])
```js
HAS( { name: "Jane" }, "name" ) // true
HAS( { name: "Jane" }, "age" ) // false
HAS( { name: null }, "name" ) // true
```
This will now return:
Note that the function checks if the specified attribute exists. This is different
from similar ways to test for the existance of an attribute, in case the attribute
has a falsy value or is not present (implicitly *null* on object access):
{
"foo": "done",
"quux": "quetzalcoatl",
"ruled": true,
"bar": "baz"
}
```js
!!{ name: "" }.name // false
HAS( { name: "" }, "name") // true
Please note that merging will only be done for top-level attributes. If you wish to
merge sub-attributes, you should consider using *MERGE_RECURSIVE* instead.
{ name: null }.name == null // true
{ }.name == null // true
HAS( { name: null }, "name" ) // true
HAS( { }, "name" ) // false
```
- *MERGE_RECURSIVE(document1, document2, ... documentN)*: Recursively
merges the documents in *document1* to *documentN* into a single document. If
document attribute keys are ambiguous, the merged result will contain the values of the
documents contained later in the argument list.
!SUBSECTION IS_SAME_COLLECTION()
For example, two documents with distinct attribute names can easily be merged into one:
/* { "user-1" : { "name" : "J", "livesIn" : { "city" : "LA", "state" : "CA" }, "age" : 42 } } */
MERGE_RECURSIVE(
{ "user-1" : { "name" : "J", "livesIn" : { "city" : "LA" } } },
{ "user-1" : { "age" : 42, "livesIn" : { "state" : "CA" } } }
)
`IS_SAME_COLLECTION(collectionName, documentHandle) → bool`
*MERGE_RECURSIVE* does not support the single array parameter variant that *MERGE* offers.
- *TRANSLATE(value, lookup, defaultValue)*: Looks up the value *value* in the *lookup*
document. If *value* is a key in *lookup*, then *value* will be replaced with the
lookup value found. If *value* is not present in *lookup*, then *defaultValue* will
be returned if specified. If no *defaultValue* is specified, *value* will be returned:
/* "France" */
TRANSLATE("FR", { US: "United States", UK: "United Kingdom", FR: "France" })
/* "not found!" */
TRANSLATE(42, { foo: "bar", bar: "baz" }, "not found!")
- *HAS(document, attributename)*: Returns *true* if *document* has an
attribute named *attributename*, and *false* otherwise.
- *ATTRIBUTES(document, removeInternal, sort)*: Returns the attribute
names of the *document* as an array.
If *removeInternal* is set to *true*, then all internal attributes (such as *_id*,
*_key* etc.) are removed from the result. If *sort* is set to *true*, then the
attribute names in the result will be sorted. Otherwise they will be returned in any order.
- *VALUES(document, removeInternal)*: Returns the attribute values of the *document*
as an array. If *removeInternal* is set to *true*, then all internal attributes (such
as *_id*, *_key* etc.) are removed from the result. The values will be returned in any order.
- *ZIP(attributes, values)*: Returns a document object assembled from the
separate parameters *attributes* and *values*. *attributes* and *values* must be
arrays and must have the same length. The items in *attributes* will be used for
naming the attributes in the result. The items in *values* will be used as the
actual values of the result.
/* { "name" : "some user", "active" : true, "hobbies" : [ "swimming", "riding" ] } */
ZIP([ 'name', 'active', 'hobbies' ], [ 'some user', true, [ 'swimming', 'riding' ] ])
- *UNSET(document, attributename, ...)*: Removes the attributes *attributename*
(can be one or many) from *document*. All other attributes will be preserved.
Multiple attribute names can be specified by either passing multiple individual string argument
names, or by passing an array of attribute names:
UNSET(doc, '_id', '_key', 'foo', 'bar')
UNSET(doc, [ '_id', '_key', 'foo', 'bar' ])
- *UNSET_RECURSIVE(document, attributename, ...)*: Recursively removes the attributes
*attributename* (can be one or many) from *document* and its sub-documents. All other
attributes will be preserved.
Multiple attribute names can be specified by either passing multiple individual string argument
names, or by passing an array of attribute names:
UNSET_RECURSIVE(doc, '_id', '_key', 'foo', 'bar')
UNSET_RECURSIVE(doc, [ '_id', '_key', 'foo', 'bar' ])
- *KEEP(document, attributename, ...)*: Keeps only the attributes *attributename*
(can be one or many) from *document*. All other attributes will be removed from the result.
Multiple attribute names can be specified by either passing multiple individual string argument
names, or by passing an array of attribute names:
KEEP(doc, 'firstname', 'name', 'likes')
KEEP(doc, [ 'firstname', 'name', 'likes' ])
- *PARSE_IDENTIFIER(document-handle)*: Parses the [document handle](../Glossary/README.md#document-handle) specified in
*document-handle* and returns a the handle's individual parts a separate attributes.
This function can be used to easily determine the [collection name](../Glossary/README.md#collection-name) and key from a given document.
The *document-handle* can either be a regular document from a collection, or a document
identifier string (e.g. *_users/1234*). Passing either a non-string or a non-document or a
document without an *_id* attribute will result in an error.
/* { "collection" : "_users", "key" : "my-user" } */
PARSE_IDENTIFIER('_users/my-user')
/* { "collection" : "mycollection", "key" : "mykey" } */
PARSE_IDENTIFIER({ "_id" : "mycollection/mykey", "value" : "some value" })
- *IS_SAME_COLLECTION(collection, document)*: Returns *true* if *document* has the same
collection id as the collection specified in *collection*. *document* can either be
a [document handle](../Glossary/README.md#document-handle) string, or a document with
a [document handle](../../Users/Appendix/Glossary.html#document-handle) string, or a document with
an *_id* attribute. The function does not validate whether the collection actually
contains the specified document, but only compares the name of the specified collection
with the collection name part of the specified document.
If *document* is neither an object with an *id* attribute nor a *string* value,
the function will return *null* and raise a warning.
/* true */
IS_SAME_COLLECTION('_users', '_users/my-user')
IS_SAME_COLLECTION('_users', { _id: '_users/my-user' })
- **collectionName** (string): the name of a collection as string
- **documentHandle** (string|object): a document identifier string (e.g. *_users/1234*)
or a regular document from a collection. Passing either a non-string or a non-document
or a document without an *_id* attribute will result in an error.
- returns **bool** (bool): return *true* if the collection of *documentHandle* is the same
as *collectionName*, otherwise *false*
/* false */
IS_SAME_COLLECTION('_users', 'foobar/baz')
IS_SAME_COLLECTION('_users', { _id: 'something/else' })
```js
// true
IS_SAME_COLLECTION( "_users", "_users/my-user" )
IS_SAME_COLLECTION( "_users", { _id: "_users/my-user" } )
// false
IS_SAME_COLLECTION( "_users", "foobar/baz")
IS_SAME_COLLECTION( "_users", { _id: "something/else" } )
```
!SUBSECTION KEEP()
`KEEP(document, attributeName1, attributeName2, ... attributeNameN) → doc`
Keep only the attributes *attributeName* to *attributeNameN* of *document*.
All other attributes will be removed from the result.
- **document** (object): a document / object
- **attributeNames** (string, *repeatable*): an arbitrary number of attribute
names as multiple arguments
- returns **doc** (object): a document with only the specified attributes on
the top-level
```js
KEEP(doc, "firstname", "name", "likes")
```
`KEEP(document, attributeNameArray) → doc`
- **document** (object): a document / object
- **attributeNameArray** (array): an array of attribute names as strings
- returns **doc** (object): a document with only the specified attributes on
the top-level
```js
KEEP(doc, [ "firstname", "name", "likes" ])
```
!SUBSECTION LENGTH()
`LENGTH(doc) → attrCount`
Determine the [number of elements](Array.md#length) in an array,
the number of attribute keys of an object / document,
the [amount of documents](Miscellaneous.md#length) in a collection,
or the [character length](String.md#length) of a string.
- **doc** (object): a document / object
- returns **attrCount** (number): the number of attribute keys in *doc*, regardless
of their values
!SUBSECTION MATCHES()
`MATCHES(document, examples, returnIndex) → match`
Compare the given *document* against each example document provided. The comparisons
will be started with the first example. All attributes of the example will be compared
against the attributes of *document*. If all attributes match, the comparison stops
and the result is returned. If there is a mismatch, the function will continue the
comparison with the next example until there are no more examples left.
The *examples* can be an array of 1..n example documents or a single document,
with any number of attributes each.
- **document** (object): document to determine whether it matches any example
- **examples** (object|array): a single document, or an array of documents to compare
against. Specifying an empty array is not allowed.
- **returnIndex** (bool): by setting this flag to *true*, the index of the example that
matched will be returned (starting at offset 0), or *-1* if there was no match.
The default is **false** and makes the function return a boolean.
- returns **match** (bool|number): if *document* matches one of the examples, *true* is
returned, otherwise *false*. A number is returned instead if *returnIndex* is used.
```js
LET doc = {
name: "jane",
age: 27,
active: true
}
RETURN MATCHES(doc, { age: 27, active: true } )
```
This will return **true**, because all attributes of the example are present in the document.
```js
RETURN MATCHES(
{ "test": 1 },
[
{ "test": 1, "foo": "bar" },
{ "foo": 1 },
{ "test": 1 }
], true)
```
This will return *2*, because the third example matches, and because the
*returnIndex* flag is set to *true*.
!SUBSECTION MERGE()
`MERGE(document1, document2, ... documentN) → mergedDocument`
Merge the documents *document1* to *documentN* into a single document.
If document attribute keys are ambiguous, the merged result will contain the values
of the documents contained later in the argument list.
Note that merging will only be done for top-level attributes. If you wish to
merge sub-attributes, use [MERGE_RECURSIVE()](#mergerecursive) instead.
- **documents** (object, *repeatable*): an arbitrary number of documents as
multiple arguments, at least 2
- returns **mergedDocument** (object): a combined document
For example, two documents with distinct attribute names can easily be merged into one:
```js
MERGE(
{ "user1": { "name": "Jane" } },
{ "user2": { "name": "Tom" } }
)
// { "user1": { "name": "Jane" }, "user2": { "name": "Tom" } }
```
When merging documents with identical attribute names, the attribute values of the
latter documents will be used in the end result:
```js
MERGE(
{ "users": { "name": "Jane" } },
{ "users": { "name": "Tom" } }
)
// { "users": { "name": "Tom" } }
```
`MERGE(docArray) → mergedDocument`
*MERGE* works with a single array parameter, too. This variant allows combining the
attributes of multiple objects in an array into a single object.
- **docArray** (array): an array of documents, as sole argument
- returns **mergedDocument** (object): a combined document
```js
MERGE(
[
{ foo: "bar" },
{ quux: "quetzalcoatl", ruled: true },
{ bar: "baz", foo: "done" }
]
)
```
This will now return:
```js
{
"foo": "done",
"quux": "quetzalcoatl",
"ruled": true,
"bar": "baz"
}
```
!SUBSECTION MERGE_RECURSIVE()
`MERGE_RECURSIVE(document1, document2, ... documentN) → mergedDocument`
Recursively merge the documents *document1* to *documentN* into a single document.
If document attribute keys are ambiguous, the merged result will contain the values
of the documents contained later in the argument list.
- **documents** (object, *repeatable*): an arbitrary number of documents as
multiple arguments, at least 2
- returns **mergedDocument** (object): a combined document
For example, two documents with distinct attribute names can easily be merged into one:
```js
MERGE_RECURSIVE(
{ "user-1": { "name": "Jane", "livesIn": { "city": "LA" } } },
{ "user-1": { "age": 42, "livesIn": { "state": "CA" } } }
)
// { "user-1": { "name": "Jane", "livesIn": { "city": "LA", "state": "CA" }, "age": 42 } }
```
*MERGE_RECURSIVE()* does not support the single array parameter variant that *MERGE* offers.
!SUBSECTION PARSE_IDENTIFIER()
`PARSE_IDENTIFIER(documentHandle) → parts`
Parse a [document handle](../../Users/Appendix/Glossary.html#document-handle) and return its
individual parts a separate attributes.
This function can be used to easily determine the
[collection name](../../Users/Appendix/Glossary.html#collection-name) and key of a given document.
- **documentHandle** (string|object): a document identifier string (e.g. *_users/1234*)
or a regular document from a collection. Passing either a non-string or a non-document
or a document without an *_id* attribute will result in an error.
- returns **parts** (object): an object with the attributes *collection* and *key*
```js
PARSE_IDENTIFIER("_users/my-user")
// { "collection": "_users", "key": "my-user" }
PARSE_IDENTIFIER( { "_id": "mycollection/mykey", "value": "some value" } )
// { "collection": "mycollection", "key": "mykey" }
```
!SUBSECTION TRANSLATE()
`TRANSLATE(value, lookupDocument, defaultValue) → mappedValue`
Look up the specified *value* in the *lookupDocument*. If *value* is a key in
*lookupDocument*, then *value* will be replaced with the lookup value found.
If *value* is not present in *lookupDocument*, then *defaultValue* will be returned
if specified. If no *defaultValue* is specified, *value* will be returned unchanged.
- **value** (string): the value to encode according to the mapping
- **lookupDocument** (object): a key/value mapping as document
- **defaultValue** (any, *optional*): a fallback value in case *value* is not found
- returns **mappedValue** (any): the encoded value, or the unaltered *value* or *defaultValue*
(if supplied) in case it couldn't be mapped
```js
TRANSLATE("FR", { US: "United States", UK: "United Kingdom", FR: "France" } )
// "France"
TRANSLATE(42, { foo: "bar", bar: "baz" } )
// 42
TRANSLATE(42, { foo: "bar", bar: "baz" }, "not found!")
// "not found!"
```
!SUBSECTION UNSET()
`UNSET(document, attributeName1, attributeName2, ... attributeNameN) → doc`
Remove the attributes *attributeName1* to *attributeNameN* from *document*.
All other attributes will be preserved.
- **document** (object): a document / object
- **attributeNames** (string, *repeatable*): an arbitrary number of attribute
names as multiple arguments, at least 1
- returns **doc** (object): *document* without the specified attributes on the
top-level
```js
UNSET( doc, "_id", "_key", "foo", "bar" )
```
`UNSET(document, attributeNameArray) → doc`
- **document** (object): a document / object
- **attributeNameArray** (array): an array of attribute names as strings
- returns **doc** (object): *document* without the specified attributes on the
top-level
```js
UNSET( doc, [ "_id", "_key", "foo", "bar" ] )
```
!SUBSECTION UNSET_RECURSIVE()
`UNSET_RECURSIVE(document, attributeName1, attributeName2, ... attributeNameN) → doc`
Recursively remove the attributes *attributeName1* to *attributeNameN* from
*document* and its sub-documents. All other attributes will be preserved.
- **document** (object): a document / object
- **attributeNames** (string, *repeatable*): an arbitrary number of attribute
names as multiple arguments, at least 1
- returns **doc** (object): *document* without the specified attributes on
all levels (top-level as well as nested objects)
```js
UNSET_RECURSIVE( doc, "_id", "_key", "foo", "bar" )
```
`UNSET_RECURSIVE(document, attributeNameArray) → doc`
- **document** (object): a document / object
- **attributeNameArray** (array): an array of attribute names as strings
- returns **doc** (object): *document* without the specified attributes on
all levels (top-level as well as nested objects)
```js
UNSET_RECURSIVE( doc, [ "_id", "_key", "foo", "bar" ] )
```
!SUBSECTION VALUES()
`VALUES(document, removeInternal) → anyArray`
Return the attribute values of the *document* as an array. Optionally omit
system attributes.
- **document** (object): a document / object
- **removeInternal** (bool, *optional*): if set to *true*, then all internal attributes
(such as *_id*, *_key* etc.) are removed from the result
- returns **anyArray** (array): the values of *document* returned in any order
```js
VALUES( { "_key": "users/jane", "name": "Jane", "age": 35 } )
// [ "Jane", 35, "users/jane" ]
VALUES( { "_key": "users/jane", "name": "Jane", "age": 35 }, true )
// [ "Jane", 35 ]
```
!SUBSECTION ZIP()
`ZIP(keys, values) → doc`
Return a document object assembled from the separate parameters *keys* and *values*.
*keys* and *values* must be arrays and have the same length.
- **keys** (array): an array of strings, to be used as attribute names in the result
- **values** (array): an array with elements of arbitrary types, to be used as
attribute values
- returns **doc** (object): a document with the keys and values assembled
```js
ZIP( [ "name", "active", "hobbies" ], [ "some user", true, [ "swimming", "riding" ] ] )
// { "name": "some user", "active": true, "hobbies": [ "swimming", "riding" ] }
```

View File

@ -1,60 +1,74 @@
!CHAPTER Fulltext functions
AQL offers the following functions to filter data based on [fulltext indexes](../IndexHandling/Fulltext.md):
AQL offers the following functions to filter data based on
[fulltext indexes](../../Users/Indexing/Fulltext.html):
- *FULLTEXT(collection, attribute, query, limit)*:
Returns all documents from collection *collection* for which the attribute *attribute*
matches the fulltext query *query*. The *limit* parameter is optional. If set to a non-zero
value, it will cap the result to at most this number of documents.
*query* is a comma-separated list of sought words (or prefixes of sought words). To
distinguish between prefix searches and complete-match searches, each word can optionally be
prefixed with either the *prefix:* or *complete:* qualifier. Different qualifiers can
be mixed in the same query. Not specifying a qualifier for a search word will implicitly
execute a complete-match search for the given word:
!SUBSECTION FULLTEXT()
- *FULLTEXT(emails, "body", "banana")* Will look for the word *banana* in the
attribute *body* of the collection *collection*.
`FULLTEXT(coll, attribute, query, limit) → docArray`
- *FULLTEXT(emails, "body", "banana,orange")* Will look for both words
*banana* and *orange* in the mentioned attribute. Only those documents will be
returned that contain both words.
Return all documents from collection *coll*, for which the attribute *attribute*
matches the fulltext search phrase *query*, optionally capped to *limit* results.
- *FULLTEXT(emails, "body", "prefix:head")* Will look for documents that contain any
words starting with the prefix *head*.
- *FULLTEXT(emails, "body", "prefix:head,complete:aspirin")* Will look for all
documents that contain a word starting with the prefix *head* and that also contain
the (complete) word *aspirin*. Note: specifying *complete* is optional here.
- *FULLTEXT(emails, "body", "prefix:cent,prefix:subst")* Will look for all documents
that contain a word starting with the prefix *cent* and that also contain a word
starting with the prefix *subst*.
If multiple search words (or prefixes) are given, then by default the results will be
AND-combined, meaning only the logical intersection of all searches will be returned.
It is also possible to combine partial results with a logical OR, and with a logical NOT:
- *FULLTEXT(emails, "body", "+this,+text,+document")* Will return all documents that
contain all the mentioned words. Note: specifying the *+* symbols is optional here.
- *FULLTEXT(emails, "body", "banana,|apple")* Will return all documents that contain
either (or both) words *banana* or *apple*.
- *FULLTEXT(emails, "body", "banana,-apple")* Will return all documents that contain
the word *banana* but do not contain the word *apple*.
- *FULLTEXT(emails, "body", "banana,pear,-cranberry")* Will return all documents that
contain both the words *banana* and *pear* but do not contain the word
*cranberry*.
No precedence of logical operators will be honored in a fulltext query. The query will simply
be evaluated from left to right.
**Note**: the *FULLTEXT* function requires the collection *collection* to have a
**Note**: the *FULLTEXT()* function requires the collection *coll* to have a
fulltext index on *attribute*. If no fulltext index is available, this function
will fail with an error. *FULLTEXT* is not meant to be used as an argument to *FILTER*
but rather to be used as the expression of the *FOR* statement:
will fail with an error.
FOR oneMail IN
FULLTEXT(emails, "body", "banana,-apple")
RETURN oneMail._id;
- **coll** (collection): a collection
- **attribute** (string): the attribute name of the attribute to search in
- **query** (string): a fulltext search expression as described below
- **limit** (number, *optional*): if set to a non-zero value, it will cap the result
to at most this number of documents
- returns **docArray** (array): an array of documents
*FULLTEXT()* is not meant to be used as an argument to *FILTER*,
but rather to be used as the expression of a *FOR* statement:
```js
FOR oneMail IN FULLTEXT(emails, "body", "banana,-apple")
RETURN oneMail._id
```
*query* is a comma-separated list of sought words (or prefixes of sought words). To
distinguish between prefix searches and complete-match searches, each word can optionally be
prefixed with either the *prefix:* or *complete:* qualifier. Different qualifiers can
be mixed in the same query. Not specifying a qualifier for a search word will implicitly
execute a complete-match search for the given word:
- *FULLTEXT(emails, "body", "banana")* Will look for the word *banana* in the
attribute *body* of the collection *collection*.
- *FULLTEXT(emails, "body", "banana,orange")* Will look for both words
*banana* and *orange* in the mentioned attribute. Only those documents will be
returned that contain both words.
- *FULLTEXT(emails, "body", "prefix:head")* Will look for documents that contain any
words starting with the prefix *head*.
- *FULLTEXT(emails, "body", "prefix:head,complete:aspirin")* Will look for all
documents that contain a word starting with the prefix *head* and that also contain
the (complete) word *aspirin*. Note: specifying *complete* is optional here.
- *FULLTEXT(emails, "body", "prefix:cent,prefix:subst")* Will look for all documents
that contain a word starting with the prefix *cent* and that also contain a word
starting with the prefix *subst*.
If multiple search words (or prefixes) are given, then by default the results will be
AND-combined, meaning only the logical intersection of all searches will be returned.
It is also possible to combine partial results with a logical OR, and with a logical NOT:
- *FULLTEXT(emails, "body", "+this,+text,+document")* Will return all documents that
contain all the mentioned words. Note: specifying the *+* symbols is optional here.
- *FULLTEXT(emails, "body", "banana,|apple")* Will return all documents that contain
either (or both) words *banana* or *apple*.
- *FULLTEXT(emails, "body", "banana,-apple")* Will return all documents that contain
the word *banana*, but do not contain the word *apple*.
- *FULLTEXT(emails, "body", "banana,pear,-cranberry")* Will return all documents that
contain both the words *banana* and *pear*, but do not contain the word
*cranberry*.
No precedence of logical operators will be honored in a fulltext query. The query will simply
be evaluated from left to right.

View File

@ -1,63 +1,114 @@
!CHAPTER Geo functions
AQL offers the following functions to filter data based on [geo indexes](../Glossary/README.md#geo-index):
!SECTION Geo index functions
- *NEAR(collection, latitude, longitude, limit, distancename)*:
Returns at most *limit* documents from collection *collection* that are near
*latitude* and *longitude*. The result contains at most *limit* documents, returned in
any order. If more than *limit* documents qualify, it is undefined which of the qualifying
documents are returned. Optionally, the distances between the specified coordinate
(*latitude* and *longitude*) and the document coordinates can be returned as well.
To make use of that, an attribute name for the distance result has to be specified in
the *distancename* argument. The result documents will contain the distance value in
an attribute of that name.
*limit* is an optional parameter since ArangoDB 1.3. If it is not specified or null, a limit
value of 100 will be applied.
- *WITHIN(collection, latitude, longitude, radius, distancename)*:
Returns all documents from collection *collection* that are within a radius of
*radius* around that specified coordinate (*latitude* and *longitude*). The order
in which the result documents are returned is undefined. Optionally, the distance between the
coordinate and the document coordinates can be returned as well.
To make use of that, an attribute name for the distance result has to be specified in
the *distancename* argument. The result documents will contain the distance value in
an attribute of that name.
* *WITHIN_RECTANGLE(collection, latitude1, longitude1, latitude2, longitude2)*:
Returns all documents from collection *collection* that are positioned inside the bounding
rectangle with the points (*latitude1*, *longitude1*) and (*latitude2*, *longitude2*).
Note: these functions require the collection *collection* to have at least
one geo index. If no geo index can be found, calling this function will fail
AQL offers the following functions to filter data based on
[geo indexes](../../Users/Indexing/Geo.html). These functions require the collection to have at
least one geo index. If no geo index can be found, calling this function will fail
with an error.
- *IS_IN_POLYGON(polygon, latitude, longitude)*:
Returns `true` if the point (*latitude*, *longitude*) is inside the polygon specified in the
*polygon* parameter. The result is undefined (may be `true` or `false`) if the specified point
is exactly on a boundary of the polygon.
!SUBSECTION NEAR()
*latitude* can alternatively be specified as an array with two values. By default,
the first array element will be interpreted as the latitude value and the second array element
as the longitude value. This can be changed by setting the 3rd parameter to `true`.
*polygon* needs to be an array of points, with each point being an array with two values. The
first value of each point is considered to be the latitude value and the second to be the
longitude value, unless the 3rd parameter has a value of `true`. That means latitude and
longitude need to be specified in the same order in the *points* parameter as they are in
the search coordinate.
`NEAR(coll, latitude, longitude, limit, distanceName) → docArray`
Examples:
Return at most *limit* documents from collection *coll* that are near *latitude*
and *longitude*. The result contains at most *limit* documents, returned in
any order. If more than *limit* documents qualify, it is undefined which of the qualifying
documents are returned. Optionally, the distances between the specified coordinate
(*latitude* and *longitude*) and the document coordinates can be returned as well.
To make use of that, the desired attribute name for the distance result has to be specified
in the *distanceName* argument. The result documents will contain the distance value in
an attribute of that name.
/* will check if the point (lat 4, lon 7) is contained inside the polygon */
RETURN IS_IN_POLYGON([ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], 4, 7)
- **coll** (collection): a collection
- **latitude** (number): the latitude portion of the search coordinate
- **longitude** (number): the longitude portion of the search coordinate
- **limit** (number, *optional*): cap the result to at most this number of documents.
The default is 100. If more documents than *limit* are found, it is undefined which
ones will be returned.
- **distanceName** (string, *optional*): include the distance to the search coordinate
in each document in the result (in meters), using the attribute name *distanceName*
- returns **docArray** (array): an array of documents, in random order
/* will check if the point (lat 4, lon 7) is contained inside the polygon */
RETURN IS_IN_POLYGON([ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], [ 4, 7 ])
/* will check if the point (lat 4, lon 7) is contained inside the polygon */
RETURN IS_IN_POLYGON([ [ 0, 0 ], [ 10, 0 ], [ 10, 10 ], [ 0, 10 ] ], [ 7, 4 ], true)
!SUBSECTION WITHIN()
`WITHIN(coll, latitude, longitude, radius, distanceName) → docArray`
!SUBSUBSECTION Related topics
Return all documents from collection *coll* that are within a radius of *radius*
around the specified coordinate (*latitude* and *longitude*). The order in which the
result documents are returned is undefined. Optionally, the distance between the
search coordinate and the document coordinates can be returned as well. To make use
of that, an attribute name for the distance result has to be specified in
the *distanceName* argument. The result documents will contain the distance value in
an attribute of that name.
ArangoDB geographic indexing feature is described in [Geo indexes](../IndexHandling/Geo.md)
- **coll** (collection): a collection
- **latitude** (number): the latitude portion of the search coordinate
- **longitude** (number): the longitude portion of the search coordinate
- **radius** (number): radius in meters
- **distanceName** (string, *optional*): include the distance to the search coordinate
in each document in the result (in meters), using the attribute name *distanceName*
- returns **docArray** (array): an array of documents, in random order
!SUBSECTION WITHIN_RECTANGLE()
`WITHIN_RECTANGLE(coll, latitude1, longitude1, latitude2, longitude2) → docArray`
Return all documents from collection *coll* that are positioned inside the bounding
rectangle with the points (*latitude1*, *longitude1*) and (*latitude2*, *longitude2*).
- **coll** (collection): a collection
- **latitude1** (number): the bottom-left latitude portion of the search coordinate
- **longitude1** (number): the bottom-left longitude portion of the search coordinate
- **latitude2** (number): the top-right latitude portion of the search coordinate
- **longitude2** (number): the top-right longitude portion of the search coordinate
- returns **docArray** (array): an array of documents, in random order
!SECTION Geo utility functions
The following helper functions do not use any geo index.
!SUBSECTION IS_IN_POLYGON()
Determine whether a coordinate is inside a polygon.
`IS_IN_POLYGON(polygon, latitude, longitude) → bool`
- **polygon** (array): an array of arrays with 2 elements each, representing the
points of the polygon in the format *[lat, lon]*
- **latitude** (number): the latitude portion of the search coordinate
- **longitude** (number): the longitude portion of the search coordinate
- returns **bool** (bool): *true* if the point (*latitude*, *longitude*) is inside the
*polygon* or *false* if it's not. The result is undefined (can be *true* or *false*)
if the specified point is exactly on a boundary of the polygon.
```js
// will check if the point (lat 4, lon 7) is contained inside the polygon
IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], 4, 7 )
```
`IS_IN_POLYGON(polygon, coord, useLonLat) → bool`
The 2nd parameter can alternatively be specified as an array with two values.
By default, each array element in *polygon* is expected to be in the format *[lat, lon]*.
This can be changed by setting the 3rd parameter to *true* to interpret the points as
*[lon, lat]*. *coord* will then also be interpreted in the same way.
- **polygon** (array): an array of arrays with 2 elements each, representing the
points of the polygon
- **coord** (array): the search coordinate as a number array with two elements
- **useLonLat** (bool, *optional*): if set to *true*, the coordinates in *polygon* and
the search coordinate *coord* will be interpreted as *[lon, lat]*. The default is
*false* and the format *[lat, lon]* is expected.
- returns **bool** (bool): *true* if the point *coord* is inside the *polygon* or
*false* if it's not. The result is undefined (can be *true* or *false*) if the
specified point is exactly on a boundary of the polygon.
```js
// will check if the point (lat 4, lon 7) is contained inside the polygon
IS_IN_POLYGON( [ [ 0, 0 ], [ 0, 10 ], [ 10, 10 ], [ 10, 0 ] ], [ 4, 7 ] )
// will check if the point (lat 4, lon 7) is contained inside the polygon
IS_IN_POLYGON( [ [ 0, 0 ], [ 10, 0 ], [ 10, 10 ], [ 0, 10 ] ], [ 7, 4 ], true )
```

View File

@ -1,78 +1,208 @@
!CHAPTER Control flow functions
!CHAPTER Miscellaneous functions
AQL offers the following functions to let the user control the flow of operations:
!SECTION Control flow functions
- *NOT_NULL(alternative, ...)*: Returns the first alternative that is not *null*,
and *null* if all alternatives are *null* themselves
!SUBSECTION NOT_NULL()
- *FIRST_LIST(alternative, ...)*: Returns the first alternative that is an array, and
*null* if none of the alternatives is an array
`NOT_NULL(alternative, ...) → value`
- *FIRST_DOCUMENT(alternative, ...)*: Returns the first alternative that is a document,
and *null* if none of the alternatives is a document
Return the first alternative that is not *null*, and *null* if all alternatives
are *null* themselves.
!SECTION Miscellaneous functions
- **alternative** (any, *repeatable*): input of arbitrary type
- returns **value** (any):
Finally, AQL supports the following functions that do not belong to any of the other
function categories:
!SUBSECTION FIRST_LIST()
- *HASH(value)*: Calculates a hash value for *value*. *value* is not required to be a
string, but can have any data type. The calculated hash value will take the data type
of *value* into account, so for example the number *1* and the string *"1"* will have
different hash values. For arrays the hash values will be creared if the arrays contain
exactly the same values (including value types) in the same order. For objects the same
hash values will be created if the objects have exactly the same attribute names and
values (including value types). The order in which attributes appear inside objects
is not important for hashing.
The hash value returned by this function is a number. The hash algorithm is not guaranteed
to remain the same in future versions of ArangoDB. The hash values should therefore be
used only for temporary calculations, e.g. to compare if two documents are the same, or
for grouping values in queries.
Return the first alternative that is an array, and *null* if none of the
alternatives is an array.
- *COLLECTIONS()*: Returns an array of collections. Each collection is returned as a document
with attributes *name* and *_id*
- **alternative** (any, *repeatable*): input of arbitrary type
- returns **list** (list|null): array / list or null
- *CURRENT_USER()*: Returns the name of the current user. The current user is the user
account name that was specified in the *Authorization* HTTP header of the request. It will
only be populated if authentication on the server is turned on, and if the query was executed
inside a request context. Otherwise, the return value of this function will be *null*.
!SUBSECTION FIRST_DOCUMENT()
- *DOCUMENT(collection, id)*: Returns the document which is uniquely identified by
the *id*. ArangoDB will try to find the document using the *_id* value of the document
in the specified collection. If there is a mismatch between the *collection* passed and
the collection specified in *id*, then *null* will be returned. Additionally, if the
*collection* matches the collection value specified in *id* but the document cannot be
found, *null* will be returned. This function also allows *id* to be an array of ids.
In this case, the function will return an array of all documents that could be found.
`FIRST_DOCUMENT(value) → doc`
*Examples*
DOCUMENT(users, "users/john")
DOCUMENT(users, "john")
Return the first alternative that is a document, and *null* if none of the
alternatives is a document.
DOCUMENT(users, [ "users/john", "users/amy" ])
DOCUMENT(users, [ "john", "amy" ])
- **alternative** (any, *repeatable*): input of arbitrary type
- returns **doc** (object|null): document / object or null
Note: The *DOCUMENT* function is polymorphic since ArangoDB 1.4. It can now be used with
a single parameter *id* as follows:
!SUBSECTION Ternary operator
- *DOCUMENT(id)*: In this case, *id* must either be a document handle string
(consisting of collection name and document key) or an array of document handle strings, e.g.
For conditional evaluation, check out the
[ternary operator](../Operators.md#ternary-operator).
DOCUMENT("users/john")
DOCUMENT([ "users/john", "users/amy" ])
!SECTION Database functions
- *CALL(function, arg1, ..., argN)*: Dynamically calls the function with name *function*
with the arguments specified. Both built-in and user-defined functions can be called.
Arguments are passed as separate parameters to the called function.
!SUBSECTION COLLECTION_COUNT()
/* "this" */
CALL('SUBSTRING', 'this is a test', 0, 4)
`COLLECTION_COUNT(coll) → count`
- *APPLY(function, arguments)*: Dynamically calls the function with name *function*
with the arguments specified. Both built-in and user-defined functions can be called.
Arguments are passed as separate parameters to the called function.
Determine the amount of documents in a collection. [LENGTH()](#length)
is preferred.
/* "this is" */
APPLY('SUBSTRING', [ 'this is a test', 0, 7 ])
!SUBSECTION COLLECTIONS()
`COLLECTIONS() → docArray`
Return an array of collections.
- returns **docArray** (array): each collection as a document with attributes
*name* and *_id* in an array
!SUBSECTION CURRENT_USER()
`CURRENT_USER() → userName`
Return the name of the current user.
The current user is the user account name that was specified in the
*Authorization* HTTP header of the request. It will only be populated if
authentication on the server is turned on, and if the query was executed inside
a request context. Otherwise, the return value of this function will be *null*.
- returns **userName** (string|null): the current user name, or *null* if
authentication is disabled
!SUBSECTION DOCUMENT()
`DOCUMENT(collection, id) → doc`
Return the document which is uniquely identified by its *id*. ArangoDB will
try to find the document using the *_id* value of the document in the specified
collection.
If there is a mismatch between the *collection* passed and the
collection specified in *id*, then *null* will be returned. Additionally,
if the *collection* matches the collection value specified in *id* but the
document cannot be found, *null* will be returned.
This function also allows *id* to be an array of ids. In this case, the
function will return an array of all documents that could be found.
It is also possible to specify a document key instead of an id, or an array
of keys to return all documents that can be found.
- **collection** (string): name of a collection
- **id** (string|array): a document handle string (consisting of collection
name and document key), a document key, or an array of both document handle
strings and document keys
- returns **doc** (document|array|null): the content of the found document,
an array of all found documents or *null* if nothing was found
```js
DOCUMENT( users, "users/john" )
DOCUMENT( users, "john" )
DOCUMENT( users, [ "users/john", "users/amy" ] )
DOCUMENT( users, [ "john", "amy" ] )
```
`DOCUMENT(id) → doc`
The function can also be used with a single parameter *id* as follows:
- **id** (string|array): either a document handle string (consisting of
collection name and document key) or an array of document handle strings
- returns **doc** (document|null): the content of the found document
or *null* if nothing was found
```js
DOCUMENT("users/john")
DOCUMENT( [ "users/john", "users/amy" ] )
```
!SUBSECTION LENGTH()
`LENGTH(coll) → documentCount`
Determine the [number of elements](Array.md#length) in an array,
the [number of attribute keys](Document.md#length) of an object / document,
the amount of documents in a collection,
or the [character length](String.md#length) of a string.
It calls [COLLECTION_COUNT()](#collectioncount) internally.
- **coll** (collection): a collection (not string)
- returns **documentCount** (number): the total amount of documents in *coll*
!SECTION Hash functions
`HASH(value) → hashNumber`
Calculate a hash value for *value*.
- **value** (any): an element of arbitrary type
- returns **hashNumber** (number): a hash value of *value*
*value* is not required to be a string, but can have any data type. The calculated
hash value will take the data type of *value* into account, so for example the
number *1* and the string *"1"* will have different hash values. For arrays the
hash values will be creared if the arrays contain exactly the same values
(including value types) in the same order. For objects the same hash values will
be created if the objects have exactly the same attribute names and values
(including value types). The order in which attributes appear inside objects
is not important for hashing.
The hash value returned by this function is a number. The hash algorithm is not
guaranteed to remain the same in future versions of ArangoDB. The hash values
should therefore be used only for temporary calculations, e.g. to compare if two
documents are the same, or for grouping values in queries.
!SECTION Function calling
!SUBSECTION APPLY()
`APPLY(functionName, arguments) → retVal`
Dynamically call the function *funcName* with the arguments specified.
Arguments are given as array and are passed as separate parameters to
the called function.
Both built-in and user-defined functions can be called.
- **funcName** (string): a function name
- **arguments** (array, *optional*): an array with elements of arbitrary type
- returns **retVal** (any): the return value of the called function
```js
APPLY( "SUBSTRING", [ "this is a test", 0, 7 ] )
// "this is"
```
!SUBSECTION CALL()
`CALL(funcName, arg1, arg2, ... argN) → retVal`
Dynamically call the function *funcName* with the arguments specified.
Arguments are given as multiple parameters and passed as separate
parameters to the called function.
Both built-in and user-defined functions can be called.
- **funcName** (string): a function name
- **args** (any, *repeatable*): an arbitrary number of elements as
multiple arguments, can be omitted
- returns **retVal** (any): the return value of the called function
```js
CALL( "SUBSTRING", "this is a test", 0, 4 )
// "this"
```
!SECTION Internal functions
!SUBSECTION FAIL()
`FAIL(reason)`
!SUBSECTION NOOP()
`NOOP(value) → retVal`
!SUBSECTION V8()
`V8(value) → retVal`

View File

@ -3,71 +3,307 @@
AQL offers some numeric functions for calculations. The following functions are
supported:
- *FLOOR(value)*: Returns the integer closest but not greater to *value*
!SUBSECTION ABS()
- *CEIL(value)*: Returns the integer closest but not less than *value*
`ABS(value) → unsignedValue`
- *ROUND(value)*: Returns the integer closest to *value*
Return the absolute part of *value*.
Rounding towards zero, also known as `trunc()`in C/C++, can be achieved with
a combination of the [ternary operator](../Operators.md#ternary-operator),
`CEIL()` and `FLOOR()`:
```
LET rounded = value >= 0 ? FLOOR(value) : CEIL(value)
```
- **value** (number): any number, positive or negative
- returns **unsignedValue** (number): the number without + or - sign
- *ABS(value)*: Returns the absolute part of *value*
```js
ABS(-5) // 5
ABS(+5) // 5
ABS(3.5) // 3.5
```
- *SQRT(value)*: Returns the square root of *value*
!SUBSECTION AVERAGE()
- *POW(base, exp)*: Returns the *base* to the exponent *exp*
`AVERAGE(numArray) → mean`
- *RAND()*: Returns a pseudo-random number between 0 and 1
Return the average (arithmetic mean) of the values in *array*.
- *MIN(array)*: Returns the smallest element of *array*. *null* values
are ignored. If the array is empty or only *null* values are contained in the array, the
function will return *null*.
- **numArray** (array): an array of numbers, *null* values are ignored
- returns **mean** (number|null): the average value of *numArray*. If the array is
empty or contains *null* values only, *null* will be returned.
- *MAX(array)*: Returns the greatest element of *array*. *null* values
are ignored. If the array is empty or only *null* values are contained in the array, the
function will return *null*.
!SUBSECTION CEIL()
- *AVERAGE(array)*: Returns the average (arithmetic mean) of the values in *array*.
This requires the elements in *array* to be numbers. *null* values are ignored.
If the array is empty or only *null* values are contained in the array, the function
will return *null*.
`CEIL(value) → roundedValue`
- *SUM(array)*: Returns the sum of the values in *array*. This
requires the elements in *array* to be numbers. *null* values are ignored.
Return the integer closest but not less than *value*.
- *MEDIAN(array)*: Returns the median value of the values in *array*. This
requires the elements in *array* to be numbers. *null* values are ignored. If the
array is empty or only *null* values are contained in the array, the function will return
*null*.
- **value** (number): any number
- returns **roundedValue** (number): the value rounded to the ceiling
- *PERCENTILE(array, n, method)*: Returns the *n*th percentile of the values in *array*.
This requires the elements in *array* to be numbers. *null* values are ignored. *n* must
be between 0 (excluded) and 100 (included). *method* can be *rank* or *interpolation*.
The function will return null if the array is empty or only *null* values are contained
in it or the percentile cannot be calculated.
```js
CEIL(2.49) // 3
CEIL(2.50) // 3
CEIL(-2.50) // -2
CEIL(-2.51) // -2
```
- *VARIANCE_POPULATION(array)*: Returns the population variance of the values in
*array*. This requires the elements in *array* to be numbers. *null* values
are ignored. If the array is empty or only *null* values are contained in the array,
the function will return *null*.
!SUBSECTION FLOOR()
- *VARIANCE_SAMPLE(array)*: Returns the sample variance of the values in
*array*. This requires the elements in *array* to be numbers. *null* values
are ignored. If the array is empty or only *null* values are contained in the array,
the function will return *null*.
`FLOOR(value) → roundedValue`
- *STDDEV_POPULATION(array)*: Returns the population standard deviation of the
values in *array*. This requires the elements in *array* to be numbers. *null*
values are ignored. If the array is empty or only *null* values are contained in the array,
the function will return *null*.
Return the integer closest but not greater than *value*.
- *STDDEV_SAMPLE(array)*: Returns the sample standard deviation of the values in
*array*. This requires the elements in *array* to be numbers. *null* values
are ignored. If the array is empty or only *null* values are contained in the array,
the function will return *null*.
- **value** (number): any number
- returns **roundedValue** (number): the value rounded to the floor
```js
FLOOR(2.49) // 2
FLOOR(2.50) // 2
FLOOR(-2.50) // -3
FLOOR(-2.51) // -3
```
!SUBSECTION MAX()
`MAX(anyArray) → max`
Return the greatest element of *anyArray*. The array is not limited to numbers.
Also see [type and value order](../Fundamentals/TypeValueOrder.md).
- **anyArray** (array): an array of numbers, *null* values are ignored
- returns **max** (any|null): the element with the greatest value. If the array is
empty or contains *null* values only, the function will return *null*.
```js
MAX( [5, 9, -2, null, 1] ) // 9
```
!SUBSECTION MEDIAN()
`MEDIAN(numArray) → median`
Return the median value of the values in *array*.
The array is sorted and the element in the middle is returned. If the array has an
even length of elements, the two center-most elements are interpolated by calculating
the average value (arithmetic mean).
- **numArray** (array): an array of numbers, *null* values are ignored
- returns **median** (number|null): the median of *numArray*. If the array is
empty or contains *null* values only, the function will return *null*.
```js
MEDIAN( [ 1, 2, 3] ) // 2
MEDIAN( [ 1, 2, 3, 4 ] ) // 2.5
MEDIAN( [ 4, 2, 3, 1 ] ) // 2.5
MEDIAN( [ 999, 80, 4, 4, 4, 3, 3, 3 ] ) // 4
```
!SUBSECTION MIN()
`MIN(anyArray) → min`
Return the smallest element of *anyArray*. The array is not limited to numbers.
Also see [type and value order](../Fundamentals/TypeValueOrder.md).
- **anyArray** (array): an array of numbers, *null* values are ignored
- returns **min** (any|null): the element with the smallest value. If the array is
empty or contains *null* values only, the function will return *null*.
```js
MIN( [5, 9, -2, null, 1] ) // -2
```
!SUBSECTION PERCENTILE()
`PERCENTILE(numArray, n, method) → percentile`
Return the *n*th percentile of the values in *numArray*.
- **numArray** (array): an array of numbers, *null* values are ignored
- **n** (number): must be between 0 (excluded) and 100 (included)
- **method** (string, *optional*): "rank" (default) or "interpolation"
- returns **percentile** (number|null): the *n*th percentile, or *null* if the
array is empty or only *null* values are contained in it or the percentile
cannot be calculated
```js
PERCENTILE( [1, 2, 3, 4], 50 ) // 2
PERCENTILE( [1, 2, 3, 4], 50, "rank" ) // 2
PERCENTILE( [1, 2, 3, 4], 50, "interpolation" ) // 2.5
```
!SUBSECTION POW()
`POW(base, exp) → num`
Return the *base* to the exponent *exp*.
- **base** (number): the base value
- **exp** (number): the exponent value
- returns **num** (number): the exponentiated value
```js
POW( 2, 4 ) // 16
POW( 5, -1 ) // 0.2
POW( 5, 0 ) // 1
```
!SUBSECTION RAND()
`RAND() → randomNumber`
Return a pseudo-random number between 0 and 1.
- returns **randomNumber** (number): a number greater than 0 and less than 1
```js
RAND() // 0.3503170117504508
RAND() // 0.6138226173882478
```
Complex example:
```js
LET coinFlips = (
FOR i IN 1..100000
RETURN RAND() > 0.5 ? "heads" : "tails"
)
RETURN MERGE(
FOR flip IN coinFlips
COLLECT f = flip WITH COUNT INTO count
RETURN { [f]: count }
)
```
Result:
```json
[
{
"heads": 49902,
"tails": 50098
}
]
```
!SUBSECTION ROUND()
`ROUND(value) → roundedValue`
Return the integer closest to *value*.
- **value** (number): any number
- returns **roundedValue** (number): the value rounded to the closest integer
```js
ROUND(2.49) // 2
ROUND(2.50) // 3
ROUND(-2.50) // -2
ROUND(-2.51) // -3
```
Rounding towards zero, also known as `trunc()`in C/C++, can be achieved with
a combination of the [ternary operator](../Operators.md#ternary-operator),
[CEIL()](#ceil) and [FLOOR()](#floor):
```js
LET rounded = value >= 0 ? FLOOR(value) : CEIL(value)
```
!SUBSECTION SQRT()
`SQRT(value) → squareRoot`
Return the square root of *value*.
- **value** (number): a number
- returns **squareRoot** (number): the square root of *value*
```js
SQRT(9) // 3
SQRT(2) // 1.4142135623730951
```
Other roots can be calculated with [POW()](#pow) like `POW(value, 1/n)`:
```js
// 4th root of 8 * 8 * 8 * 8 = 4096
POW(4096, 1/4) // 8
// cube root of 3 * 3 * 3 = 27
POW(27, 1/3) // 3
// square root of 3 * 3 = 9
POW(9, 1/2) // 3
```
!SUBSECTION STDDEV_POPULATION()
`STDDEV_POPULATION(numArray) → num`
Return the population standard deviation of the values in *array*.
- **numArray** (array): an array of numbers, *null* values are ignored
- returns **num** (number|null): the population standard deviation of *numArray*.
If the array is empty or only *null* values are contained in the array,
*null* will be returned.
```js
STDDEV_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 1.854723699099141
```
!SUBSECTION STDDEV_SAMPLE()
`STDDEV_SAMPLE(numArray) → num`
Return the sample standard deviation of the values in *array*.
- **numArray** (array): an array of numbers, *null* values are ignored
- returns **num** (number|null): the sample standard deviation of *numArray*.
If the array is empty or only *null* values are contained in the array,
*null* will be returned.
```js
STDDEV_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 2.0736441353327724
```
!SUBSECTION SUM()
`SUM(numArray) → sum`
Return the sum of the values in *array*.
- **numArray** (array): an array of numbers, *null* values are ignored
-returns **sum** (number): the total of all values in *numArray*
```js
SUM( [1, 2, 3, 4] ) // 10
```
!SUBSECTION VARIANCE_POPULATION()
`VARIANCE_POPULATION(numArray) → num`
Return the population variance of the values in *array*.
- **numArray** (array): an array of numbers, *null* values are ignored
- returns **num** (number|null): the population variance of *numArray*.
If the array is empty or only *null* values are contained in the array,
*null* will be returned.
```js
VARIANCE_POPULATION( [ 1, 3, 6, 5, 2 ] ) // 3.4400000000000004
```
!SUBSECTION VARIANCE_SAMPLE()
`VARIANCE_SAMPLE(array) → num`
Return the sample variance of the values in *array*.
- **numArray** (array): an array of numbers, *null* values are ignored
- returns **num** (number|null): the sample variance of *numArray*.
If the array is empty or only *null* values are contained in the array,
*null* will be returned.
```js
VARIANCE_SAMPLE( [ 1, 3, 6, 5, 2 ] ) // 4.300000000000001
```

View File

@ -4,7 +4,9 @@ AQL supports functions to allow more complex computations. Functions can be
called at any query position where an expression is allowed. The general
function call syntax is:
FUNCTIONNAME(arguments)
```js
FUNCTIONNAME(arguments)
```
where *FUNCTIONNAME* is the name of the function to be called, and *arguments*
is a comma-separated list of function arguments. If a function does not need any
@ -14,9 +16,11 @@ calls distinguishable from variable names.
Some example function calls:
HAS(user, "name")
LENGTH(friends)
COLLECTIONS()
```js
HAS(user, "name")
LENGTH(friends)
COLLECTIONS()
```
In contrast to collection and variable names, function names are case-insensitive,
i.e. *LENGTH(foo)* and *length(foo)* are equivalent.
@ -27,7 +31,7 @@ Since ArangoDB 1.3, it is possible to extend AQL with user-defined functions.
These functions need to be written in JavaScript, and be registered before usage
in a query.
Please refer to [Extending AQL](../AqlExtending/README.md) for more details on this.
Please refer to [Extending AQL](../Extending/index.html) for more details on this.
By default, any function used in an AQL query will be sought in the built-in
function namespace *_aql*. This is the default namespace that contains all AQL
@ -36,8 +40,9 @@ To refer to a user-defined AQL function, the function name must be fully qualifi
to also include the user-defined namespace. The *::* symbol is used as the namespace
separator:
MYGROUP::MYFUNC()
```js
MYGROUP::MYFUNC()
MYFUNCTIONS::MATH::RANDOM()
```
MYFUNCTIONS::MATH::RANDOM()
As all AQL function names, user function names are also case-insensitive.

View File

@ -2,161 +2,460 @@
For string processing, AQL offers the following functions:
- *CONCAT(value1, value2, ... valueN)*: Concatenate the strings
passed as in *value1* to *valueN*. *null* values are ignored. Array value arguments
!SUBSECTION CHAR_LENGTH()
`CHAR_LENGTH(value) → length`
Return the number of characters in *value* (not byte length).
This is a synonym for [LENGTH()](#length).
!SUBSECTION CONCAT()
`CONCAT(value1, value2, ... valueN) → joinedString`
Concatenate the strings passed as *value1* to *valueN*.
- **values** (string|array, *repeatable*): strings or arrays of strings as multiple
arguments, at least one
- returns **joinedString** (string): a concatenated string of the elements. *null*
values are ignored. Array value arguments are expanded automatically, and their
individual members will be concatenated. Nested arrays will be expanded too, but
with their elements separated by commas if they have more than a single element.
```js
CONCAT("foo", "bar", "baz")
// "foobarbaz"
CONCAT([ "foo", "bar", "baz" ])
// "foobarbaz"
CONCAT([ "he", ["ll"] ], "o!")
// "hello!"
CONCAT([ "he", ["l", "l"] ], "o!")
// "hel,lo!"
```
!SUBSECTION CONCAT_SEPARATOR()
`CONCAT_SEPARATOR(separator, value1, value2, ... valueN) → joinedString`
Concatenate the strings passed as arguments *value1* to *valueN* using the
*separator* string.
- **separator** (string): an arbitrary separator string
- **values** (string|array, *repeatable*): strings or arrays of strings as multiple
arguments, at least one
- returns **joinedString** (string): a concatenated string of the elements, using
*separator* as separator string. *null* values are ignored. Array value arguments
are expanded automatically, and their individual members will be concatenated.
Nested arrays will be expanded too, but with their elements separated by commas
if they have more than a single element.
/* "foobarbaz" */
CONCAT('foo', 'bar', 'baz')
```js
CONCAT_SEPARATOR(", ", "foo", "bar", "baz")
// "foo, bar, baz"
/* "foobarbaz" */
CONCAT([ 'foo', 'bar', 'baz' ])
/* "hello!" */
CONCAT([ 'he', ['ll'] ], 'o!')
CONCAT_SEPARATOR(", ", [ "foo", "bar", "baz" ])
// "foo, bar, baz"
- *CONCAT_SEPARATOR(separator, value1, value2, ... valueN)*:
Concatenate the strings passed as arguments *value1* to *valueN* using the
*separator* string. *null* values are ignored. Array value arguments
are expanded automatically, and their individual members will be concatenated.
CONCAT_SEPARATOR(", ", [ "foo", [ "b", "a", "r" ], "baz" ])
// [ "foo, b,a,r, baz" ]
/* "foo, bar, baz" */
CONCAT_SEPARATOR(', ', 'foo', 'bar', 'baz')
/* "1-2-3-4-5" */
CONCAT_SEPARATOR("-", [1, 2, 3, null], [4, null, 5])
```
/* "foo, bar, baz" */
CONCAT_SEPARATOR(', ', [ 'foo', 'bar', 'baz' ])
!SUBSECTION CONTAINS()
/* "1-2-3-4-5" */
CONCAT_SEPARATOR('-', [1, 2, 3, null], [4, null, 5])
`CONTAINS(text, search, returnIndex) → match`
- *CHAR_LENGTH(value)*: Return the number of characters in *value* (not byte length).
This is a synonym for *LENGTH(value)*.
Check whether the string *search* is contained in the string *text*.
The string matching performed by *CONTAINS* is case-sensitive.
- *LOWER(value)*: Convert upper-case letters in *value* to their lower-case
counterparts. All other characters are returned unchanged.
- **text** (string): the haystack
- **search** (string): the needle
- returnIndex** (bool, *optional*): if set to *true*, the character position
of the match is returned instead of a boolean. The default is *false*.
The default is *false*.
- returns **match** (bool|number): by default, *true* is returned if *search*
is contained in *text*, and *false* otherwise. With *returnIndex* set to *true*,
the position of the first occurrence of *search* within *text* is returned
(starting at offset 0), or *-1* if *search* is not contained in *text*.
- *UPPER(value)*: Convert lower-case letters in *value* to their upper-case
counterparts. All other characters are returned unchanged.
```js
CONTAINS("foobarbaz", "bar") // true
CONTAINS("foobarbaz", "horse") // false
CONTAINS("foobarbaz", "ba", true) // 3
CONTAINS("foobarbaz", "horse", true) // -1
```
- *SUBSTITUTE(value, search, replace, limit)*: Replaces search values in the string
*value*. If *search* is a string, all occurrences of *search* will be replaced in
*value*. If *search* is a list, each occurrence of a value contained in *search*
will be replaced by the corresponding list item in *replace*. If *replace* has less
list items than *search*, occurrences of unmapped *search* items will be replaced
by an empty string. The number of replacements can optionally be limited using the
*limit* parameter. If the *limit* is reached, no further occurrences of the search
values will be replaced.
!SUBSECTION FIND_FIRST()
/* "the lazy brown foxx" */
SUBSTITUTE("the quick brown foxx", "quick", "lazy")
`FIND_FIRST(text, search, start, end) → position`
/* "the slow brown dog" */
SUBSTITUTE("the quick brown foxx", [ "quick", "foxx" ], [ "slow", "dog" ])
Return the position of the first occurrence of the string *search* inside the
string *text*. Positions start at 0.
/* "A VOID! brown " */
SUBSTITUTE("the quick brown foxx", [ "the", "quick", "foxx" ], [ "A", "VOID!" ])
- **text** (string): the haystack
- **search** (string): the needle
- **start** (number, *optional*): limit the search to a subset of the text,
beginning at *start*
- **end** (number, *optional*): limit the search to a subset of the text,
ending at *end*
- returns **position** (number): the character position of the match. If *search*
is not contained in *text*, -1 is returned.
/* "the xx brown xx" */
SUBSTITUTE("the quick brown foxx", [ "quick", "foxx" ], "xx" )
```js
FIND_FIRST("foobarbaz", "ba") // 3
FIND_FIRST("foobarbaz", "ba", 4) // 6
FIND_FIRST("foobarbaz", "ba", 0, 3) // -1
```
Alternatively, *search* and *replace* can be specified in a combined value:
!SUBSECTION FIND_LAST()
/* "the small slow ant" */
SUBSTITUTE("the quick brown foxx", {
"quick" : "small",
"brown" : "slow",
"foxx" : "ant"
})
`FIND_LAST(text, search, start, end) → position`
- *SUBSTRING(value, offset, length)*: Return a substring of *value*,
starting at *offset* and with a maximum length of *length* characters. Offsets
start at position 0. Length is optional and if omitted the substring from *offset*
to the end of the string will be returned.
Return the position of the last occurrence of the string *search* inside the
string *text*. Positions start at 0.
- *LEFT(value, LENGTH)*: Returns the *LENGTH* leftmost characters of
the string *value*
- **text** (string): the haystack
- **search** (string): the needle
- **start** (number, *optional*): limit the search to a subset of the text,
beginning at *start*
- **end** (number, *optional*): limit the search to a subset of the text,
ending at *end*
- returns **position** (number): the character position of the match. If *search*
is not contained in *text*, -1 is returned.
- *RIGHT(value, LENGTH)*: Returns the *LENGTH* rightmost characters of
the string *value*
```js
FIND_LAST("foobarbaz", "ba"), // 6
FIND_LAST("foobarbaz", "ba", 7), // -1
FIND_LAST("foobarbaz", "ba", 0, 4) // 3
```
- *TRIM(value, type)*: Returns the string *value* with whitespace stripped
from the start and/or end. The optional *type* parameter specifies from which parts
of the string the whitespace is stripped:
!SUBSECTION LEFT()
- *type* 0 will strip whitespace from the start and end of the string
- *type* 1 will strip whitespace from the start of the string only
- *type* 2 will strip whitespace from the end of the string only
`LEFT(value, length) → substring`
- *TRIM(value, chars)*: Returns the string *value* with whitespace stripped
from the start and end. The optional *chars* parameter can be used to override the
characters that should be removed from the string. It defaults to `\r\n \t`
(i.e. `0x0d`, `0x0a`, `0x20` and `0x09`).
Return the *length* leftmost characters of the string *value*.
/* "foobar" */
TRIM(" foobar\t \r\n ")
- **value** (string): a string
- **length** (number): how many characters to return
- returns **substring** (string): at most *length* characters of *value*,
starting on the left-hand side of the string
/* "foo;bar;baz" */
TRIM(";foo;bar;baz, ", ",; ")
```js
LEFT("foobar", 3) // "foo"
LEFT("foobar", 10) // "foobar"
```
- *LTRIM(value, chars)*: Returns the string *value* with whitespace stripped
from the start only. The optional *chars* parameter can be used to override the
characters that should be removed from the string. It defaults to `\r\n\t `
(i.e. `0x0d`, `0x0a`, `0x09` and `0x20`).
!SUBSECTION LENGTH()
- *RTRIM(value, chars)*: Returns the string *value* with whitespace stripped
from the end only. The optional *chars* parameter can be used to override the
characters that should be removed from the string. It defaults to `\r\n\t `
(i.e. `0x0d`, `0x0a`, `0x09` and `0x20`).
`LENGTH(str) → length`
- *SPLIT(value, separator, limit)*: Splits the given string *value* into a list of
strings, using the *separator*. The *separator* can either be a string or a
list of strings. If the *separator* is the empty string, *value* will be split
into a list of characters. If no *separator* is specified, *value* will be
returned inside a list.
The optional parameter *limit* can be used to limit the number of split values in
the result. If no *limit* is given, the number of splits returned is not bounded.
Determine the [number of elements](Array.md#length) in an array,
the [number of attribute keys](Document.md#length) of an object / document,
the [amount of documents](Miscellaneous.md#length) in a collection,
or the character length of a string.
- *REVERSE(value)*: Returns the reverse of the string *value*
- **str** (string): a string. If a number is passed, it will be casted to string first.
- returns **length** (number): the character length of *str* (not byte length)
- *CONTAINS(text, search, return-index)*: Checks whether the string
*search* is contained in the string *text*. By default, this function returns
*true* if *search* is contained in *text*, and *false* otherwise. By
passing *true* as the third function parameter *return-index*, the function
will return the position of the first occurrence of *search* within *text*,
starting at offset 0, or *-1* if *search* is not contained in *text*.
```js
LENGTH("foobar") // 6
LENGTH("电脑坏了") // 4
```
The string matching performed by *CONTAINS* is case-sensitive.
!SUBSECTION LIKE()
* *FIND_FIRST(text, search, start, end)*: Returns the position of the first
occurrence of the string *search* inside the string *text*. Positions start at
zero. If *search* is not contained in *text*, -1 is returned. The search can
optionally be limited to a subset of *text* using the *start* and *end* arguments.
`LIKE(text, search, caseInsensitive) → bool`
* *FIND_LAST(text, search, start, end)*: Returns the position of the last
occurrence of the string *search* inside the string *text*. Positions start at
zero. If *search* is not contained in *text*, -1 is returned. The search can
optionally be limited to a subset of *text* using the *start* and *end* arguments.
Check whether the pattern *search* is contained in the string *text*,
using wildcard matching.
- *LIKE(text, search, case-insensitive)*: Checks whether the pattern
*search* is contained in the string *text*, using wildcard matching.
Returns *true* if the pattern is contained in *text*, and *false* otherwise.
The *pattern* string can contain the wildcard characters *%* (meaning any
sequence of characters) and *_* (any single character).
- **text** (string): a string
- **search** (string): a search pattern that can contain the wildcard characters
*%* (meaning any sequence of characters, including none) and *_* (any single
character). Literal *%* and *:* must be escaped with two backslashes.
*search* cannot be a variable or a document attribute. The actual value must
be present at query parse time already.
- **caseInsensitive** (bool, *optional*): if set to *true*, the matching will be
case-insensitive. The default is *false*.
- returns **bool** (bool): *true* if the pattern is contained in *text*,
and *false* otherwise
The string matching performed by *LIKE* is case-sensitive by default, but by
passing *true* as the third parameter, the matching will be case-insensitive.
!SUBSECTION LOWER()
The value for *search* cannot be a variable or a document attribute. The actual
value must be present at query parse time already.
`LOWER(value) → lowerCaseString`
- *MD5(text)*: calculates the MD5 checksum for *text* and returns it in a
hexadecimal string representation.
Convert upper-case letters in *value* to their lower-case counterparts.
All other characters are returned unchanged.
- *SHA1(text)*: calculates the SHA1 checksum for *text* and returns it in a
hexadecimal string representation.
- **value** (string): a string
- returns **lowerCaseString** (string): *value* with upper-case characters converted
to lower-case characters
- *RANDOM_TOKEN(length)*: generates a pseudo-random token string with the
specified length. *length* must be greater than zero and at most 65536. The
generated token may consist of lower and uppercase letters and numbers. The
algorithm for token generation should be treated as opaque.
!SUBSECTION LTRIM()
`LTRIM(value, chars) → strippedString`
Return the string *value* with whitespace stripped from the start only.
- **value** (string): a string
- **chars** (string, *optional*): override the characters that should
be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`,
`0x20` and `0x09`).
- returns **strippedString** (string): *value* without *chars* at the
left-hand side
```js
LTRIM("foo bar") // "foo bar"
LTRIM(" foo bar ") // "foo bar "
LTRIM("--==[foo-bar]==--", "-=[]") // "foo-bar]==--"
```
!SUBSECTION MD5()
`MD5(text) → hash`
Calculate the MD5 checksum for *text* and return it in a hexadecimal
string representation.
- **text** (string): a string
- returns **hash** (string): MD5 checksum as hex string
```js
MD5("foobar") // "3858f62230ac3c915f300c664312c63f"
```
!SUBSECTION RANDOM_TOKEN()
`RANDOM_TOKEN(length) → randomString`
Generate a pseudo-random token string with the specified length.
The algorithm for token generation should be treated as opaque.
- **length** (number): desired string length for the token. It must be greater
than 0 and at most 65536.
- returns **randomString** (string): a generated token consisting of lowercase
letters, uppercase letters and numbers
```js
RANDOM_TOKEN(8) // "zGl09z42"
RANDOM_TOKEN(8) // "m9w50Ft9"
```
!SUBSECTION REVERSE()
`REVERSE(value) → reversedString`
Return the reverse of the string *str*.
- **value** (string): a string
- returns **reversedString** (string): a new string with the characters in
reverse order
```js
REVERSE("foobar") // "raboof"
REVERSE("电脑坏了") // "了坏脑电"
```
!SUBSECTION RIGHT()
`RIGHT(value, length) → substring`
Return the *length* rightmost characters of the string *value*.
- **value** (string): a string
- **length** (number): how many characters to return
- returns **substring** (string): at most *length* characters of *value*,
starting on the right-hand side of the string
```js
RIGHT("foobar", 3) // "bar"
RIGHT("foobar", 10) // "foobar"
```
!SUBSECTION RTRIM()
`RTRIM(value, chars) → strippedString`
Return the string *value* with whitespace stripped from the end only.
- **value** (string): a string
- **chars** (string, *optional*): override the characters that should
be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`,
`0x20` and `0x09`).
- returns **strippedString** (string): *value* without *chars* at the
right-hand side
```js
RTRIM("foo bar") // "foo bar"
RTRIM(" foo bar ") // " foo bar"
RTRIM("--==[foo-bar]==--", "-=[]") // "--==[foo-bar"
```
!SUBSECTION SHA1()
`SHA1(text) → hash`
Calculate the SHA1 checksum for *text* and returns it in a hexadecimal
string representation.
- **text** (string): a string
- returns **hash** (string): SHA1 checksum as hex string
```js
SHA1("foobar") // "8843d7f92416211de9ebb963ff4ce28125932878"
```
!SUBSECTION SPLIT()
`SPLIT(value, separator, limit) → strArray`
Split the given string *value* into a list of strings, using the *separator*.
- **value** (string): a string
- **separator** (string): either a string or a list of strings. If *separator* is
an empty string, *value* will be split into a list of characters. If no *separator*
is specified, *value* will be returned as array.
- **limit** (number, *optional*): limit the number of split values in the result.
If no *limit* is given, the number of splits returned is not bounded.
- returns **strArray** (array): an array of strings
```js
SPLIT( "foo-bar-baz", "-" ) // [ "foo", "bar", "baz" ]
SPLIT( "foo-bar-baz", "-", 1 ) // [ "foo", "bar-baz" ]
SPLIT( "foo, bar & baz", [ ", ", " & " ] ) // [ "foo", "bar", "baz" ]
```
!SUBSECTION SUBSTITUTE()
`SUBSTITUTE(value, search, replace, limit) → substitutedString`
Replace search values in the string *value*.
- **value** (string): a string
- **search** (string|array): if *search* is a string, all occurrences of
*search* will be replaced in *value*. If *search* is an array of strings,
each occurrence of a value contained in *search* will be replaced by the
corresponding array element in *replace*. If *replace* has less list items
than *search*, occurrences of unmapped *search* items will be replaced by an
empty string.
- **replace** (string|array, *optional*): a replacement string, or an array of
strings to replace the corresponding elements of *search* with. Can have less
elements than *search* or be left out to remove matches. If *search* is an array
but *replace* is a string, then all matches will be replaced with *replace*.
- **limit** (number, *optional*): cap the number of replacements to this value
- returns **substitutedString** (string): a new string with matches replaced
(or removed)
```js
SUBSTITUTE( "the quick brown foxx", "quick", "lazy" )
// "the lazy brown foxx"
SUBSTITUTE( "the quick brown foxx", [ "quick", "foxx" ], [ "slow", "dog" ] )
// "the slow brown dog"
SUBSTITUTE( "the quick brown foxx", [ "the", "foxx" ], [ "that", "dog" ], 1 )
// "that quick brown foxx"
SUBSTITUTE( "the quick brown foxx", [ "the", "quick", "foxx" ], [ "A", "VOID!" ] )
// "A VOID! brown "
SUBSTITUTE( "the quick brown foxx", [ "quick", "foxx" ], "xx" )
// "the xx brown xx"
```
`SUBSTITUTE(value, mapping, limit) → substitutedString`
Alternatively, *search* and *replace* can be specified in a combined value.
- **value** (string): a string
- **mapping** (object): a lookup map with search strings as keys and replacement
strings as values. Empty strings and *null* as values remove matches.
- **limit** (number, *optional*): cap the number of replacements to this value
- returns **substitutedString** (string): a new string with matches replaced
(or removed)
```js
SUBSTITUTE("the quick brown foxx", {
"quick": "small",
"brown": "slow",
"foxx": "ant"
})
// "the small slow ant"
SUBSTITUTE("the quick brown foxx", {
"quick": "",
"brown": null,
"foxx": "ant"
})
// "the ant"
SUBSTITUTE("the quick brown foxx", {
"quick": "small",
"brown": "slow",
"foxx": "ant"
}, 2)
// "the small slow foxx"
```
!SUBSECTION SUBSTRING()
`SUBSTRING(value, offset, length) → substring`
Return a substring of *value*.
- **value** (string): a string
- **offset** (number): start at *offset*, offsets start at position 0
- **length** (number, *optional*): at most *length* characters, omit to get the
substring from *offset* to the end of the string
- returns **substring** (string): a substring of *value*
!SUBSECTION TRIM()
`TRIM(value, type) → strippedString`
Return the string *value* with whitespace stripped from the start and/or end.
The optional *type* parameter specifies from which parts of the string the
whitespace is stripped. [LTRIM()](#ltrim) and [RTRIM()](#rtrim) are preferred
however.
- **value** (string): a string
- **type** (number, *optional*): strip whitespace from the
- 0 start and end of the string
- 1 start of the string only
- 2 end of the string only
The default is 0.
`TRIM(value, chars) → strippedString`
Return the string *value* with whitespace stripped from the start and end.
- **value** (string): a string
- **chars** (string, *optional*): override the characters that should
be removed from the string. It defaults to `\r\n \t` (i.e. `0x0d`, `0x0a`,
`0x20` and `0x09`).
- returns **strippedString** (string): *value* without *chars* on both sides
```js
TRIM("foo bar") // "foo bar"
TRIM(" foo bar ") // "foo bar"
TRIM("--==[foo-bar]==--", "-=[]") // "foo-bar"
TRIM(" foobar\t \r\n ") // "foobar"
TRIM(";foo;bar;baz, ", ",; ") // "foo;bar;baz"
```
!SUBSECTION UPPER()
`UPPER(value) → upperCaseString`
Convert lower-case letters in *value* to their upper-case counterparts.
All other characters are returned unchanged.
- **value** (string): a string
- returns **upperCaseString** (string): *value* with lower-case characters converted
to upper-case characters

View File

@ -11,36 +11,48 @@ In an AQL query, type casts are performed only upon request and not implicitly.
This helps avoiding unexpected results. All type casts have to be performed by
invoking a type cast function. AQL offers several type cast functions for this
task. Each of the these functions takes an operand of any data type and returns
a result value of type corresponding to the function name (e.g. *TO_NUMBER()*
a result value of type corresponding to the function name (e.g. `TO_NUMBER()`
will return a numeric value):
- *TO_BOOL(value)*: Takes an input *value* of any type and converts it
into the appropriate boolean value as follows:
- *null* is converted to *false*.
- Numbers are converted to *true*, except for 0, which is converted to *false*.
- Strings are converted to *true* if they are non-empty, and to *false* otherwise.
- Arrays are always converted to *true*.
- Objects / documents are always converted to *true*.
It's also possible to use double negation to cast to boolean:
```js
!SUBSECTION TO_BOOL()
`TO_BOOL(value) → bool`
Take an input *value* of any type and convert it into the appropriate
boolean value.
- **value** (any): input of arbitrary type
- returns **bool** (boolean):
- *null* is converted to *false*
- Numbers are converted to *true*, except for 0, which is converted to *false*
- Strings are converted to *true* if they are non-empty, and to *false* otherwise
- Arrays are always converted to *true* (even if empty)
- Objects / documents are always converted to *true*
It's also possible to use double negation to cast to boolean:
```js
!!1 // true
!!0 // false
!!-0.0 // false
not not 1 // true
!!'non-empty string' // true
!!'' // false
```
`TO_BOOL()` is preferred however, because it states the intention clearer.
!!"non-empty string" // true
!!"" // false
```
- *TO_NUMBER(value)*: Takes an input *value* of any type and converts it
into a numeric value as follows:
- *null* and *false* are converted to the value *0*.
- *true* is converted to *1*.
- Numbers keep their original value.
`TO_BOOL()` is preferred however, because it states the intention clearer.
!SUBSECTION TO_NUMBER()
`TO_NUMBER(value) → number`
Take an input *value* of any type and convert it into a numeric value.
- **value** (any): input of arbitrary type
- returns **number** (number):
- *null* and *false* are converted to the value *0*
- *true* is converted to *1*
- Numbers keep their original value
- Strings are converted to their numeric equivalent if the string contains a
valid representation of a number. Whitespace at the start and end of the string
is allowed. String values that do not contain any valid representation of a number
@ -71,29 +83,45 @@ not not 1 // true
- *false* is converted to the string *"false"*, *true* to the string *"true"*
- Numbers are converted to their string representations. This can also be a
scientific notation: `TO_STRING(0.0000002) // "2e-7"`
- An empty array is converted to the empty string. An array with one member is converted
to the result of `TO_STRING()` for its sole member. An array with two or more members
is converted to a comma-separated array with the string representation of its members:
`TO_STRING([1,2,3]) // "1,2,3"`
- An object / document is converted to the string *[object Object]*.
- An empty array is converted to the empty string. An array with one member is
converted to the result of `TO_STRING()` for its sole member. An array with
two or more members is converted to a comma-separated array with the string
representation of its members: `TO_STRING([1,2,3]) // "1,2,3"`
- An object / document is converted to the string `"[object Object]"`.
`CONCAT(value)` behaves identical if a single parameter is passed only.
- *TO_ARRAY(value)*: Takes an input *value* of any type and converts it
into an array value as follows:
- *null* is converted to an empty array.
- Boolean values, numbers and strings are converted to an array containing the original
value as its single element.
- Arrays keep their original value.
- Objects / documents are converted to an array containing their attribute **values**
as array elements:
!SUBSECTION TO_ARRAY()
```js
`TO_ARRAY(value) → array`
Take an input *value* of any type and convert it into an array value.
- **value** (any): input of arbitrary type
- returns **array** (array):
- *null* is converted to an empty array
- Boolean values, numbers and strings are converted to an array containing
the original value as its single element
- Arrays keep their original value
- Objects / documents are converted to an array containing their attribute
**values** as array elements, just like [VALUES()](Document.md#values)
```js
TO_ARRAY(null) // []
TO_ARRAY(false) // [false]
TO_ARRAY(true) // [true]
TO_ARRAY(5) // [5]
TO_ARRAY("foo") // ["foo"]
TO_ARRAY([1, 2, "foo"]) // [1, 2, "foo"]
TO_ARRAY({foo: 1, bar: 2, baz: [3, 4, 5]}) // [1, 2, [3, 4, 5]]
```
```
- *TO_LIST(value)*: This is an alias for *TO_ARRAY*.
!SUBSECTION TO_LIST()
!SECTION Type check functions
`TO_LIST(value) → array`
This is an alias for [TO_ARRAY()](#toarray).
!CHAPTER Type check functions
AQL also offers functions to check the data type of a value at runtime. The
following type check functions are available. Each of these functions takes an
@ -102,26 +130,27 @@ checked for, and false otherwise.
The following type check functions are available:
- *IS_NULL(value)*: Checks whether *value* is a *null* value
- `IS_NULL(value) → bool`: Check whether *value* is a *null* value
- *IS_BOOL(value)*: Checks whether *value* is a *boolean* value
- `IS_BOOL(value) → bool`: Check whether *value* is a *boolean* value
- *IS_NUMBER(value)*: Checks whether *value* is a *numeric* value
- `IS_NUMBER(value) → bool`: Check whether *value* is a *numeric* value
- *IS_STRING(value)*: Checks whether *value* is a *string* value
- `IS_STRING(value) → bool`: Check whether *value* is a *string* value
- *IS_ARRAY(value)*: Checks whether *value* is an *array* value
- `IS_ARRAY(value) → bool`: Check whether *value* is an *array* value
- *IS_LIST(value)*: This is an alias for *IS_ARRAY*
- `IS_LIST(value) → bool`: This is an alias for *IS_ARRAY()*
- *IS_OBJECT(value)*: Checks whether *value* is an *object* / *document* value
- `IS_OBJECT(value) → bool`: Check whether *value* is an *object* /
*document* value
- *IS_DOCUMENT(value)*: This is an alias for *IS_OBJECT*
- `IS_DOCUMENT(value) → bool`: This is an alias for *IS_OBJECT()*
- *IS_DATESTRING(value)*: Checks whether *value* is a string that can be used
- `IS_DATESTRING(value) → bool`: Check whether *value* is a string that can be used
in a date function. This includes partial dates such as *2015* or *2015-10* and
strings containing invalid dates such as *2015-02-31*. The function will return
false for all non-string values, even if some of them may be usable in date functions.
- *TYPENAME(value)*: Returns the data type name of *value*. The data type name can
be either *null*, *bool*, *number*, *string*, *array* or *object*.
- `TYPENAME(value) → typeName`: Return the data type name of *value*. The data type
name can be either *"null"*, *"bool"*, *"number"*, *"string"*, *"array"* or *"object"*.

View File

@ -24,7 +24,7 @@ letter, digit or the underscore symbol.
Bind variables represent a value like a string, and must not be put in quotes.
If you need to do string processing (concatenation, etc.) in the AQL query, you need
[to use string functions to do so](../Aql/StringFunctions.md):
[to use string functions to do so](../Functions/String.md):
FOR u IN users
FILTER u.id == CONCAT('prefix', @id, 'suffix') && u.name == @name
@ -40,5 +40,8 @@ when using the bind parameter in a query, two *@* symbols must be used).
RETURN u
Specific information about parameters binding can also be found in [Aql With Web Interface](AqlWithWebInterface.md) and [Aql With Arangosh](AqlWithArangosh.md), and [HTTP Interface for AQL Queries](../HttpAqlQuery/README.md)
Specific information about parameters binding can also be found in
[Aql With Web Interface](../Invocation/WithWebInterface.md) and
[Aql With Arangosh](../Invocation/WithArangosh.md), and
[HTTP Interface for AQL Queries](../../HTTP/AqlQuery/index.html)

View File

@ -12,53 +12,61 @@ available:
- array: Sequence of values, referred to by their positions
- object / document: Sequence of values, referred to by their names
!SUBSUBSECTION Numeric literals
!SECTION Primitive types
!SUBSECTION Numeric literals
Numeric literals can be integers or real values. They can optionally be signed
using the *+* or *-* symbols. The scientific notation is also supported.
1
42
-1
-42
1.23
-99.99
0.1
-4.87e103
```
1
42
-1
-42
1.23
-99.99
0.1
-4.87e103
```
All numeric values are treated as 64-bit double-precision values internally.
The internal format used is IEEE 754.
!SUBSUBSECTION String literals
!SUBSECTION String literals
String literals must be enclosed in single or double quotes. If the used quote
character is to be used itself within the string literal, it must be escaped
using the backslash symbol. Backslash literals themselves also be escaped using
a backslash.
"yikes!"
"don't know"
"this is a \"quoted\" word"
"this is a longer string."
"the path separator on Windows is \\"
```
"yikes!"
"don't know"
"this is a \"quoted\" word"
"this is a longer string."
"the path separator on Windows is \\"
'yikes!'
'don\'t know'
'this is a longer string."
'the path separator on Windows is \\'
'yikes!'
'don\'t know'
'this is a longer string."
'the path separator on Windows is \\'
```
All string literals must be UTF-8 encoded. It is currently not possible to use
arbitrary binary data if it is not UTF-8 encoded. A workaround to use binary
data is to encode the data using base64 or other algorithms on the application
side before storing, and decoding it on application side after retrieval.
!SUBSUBSECTION Arrays
!SECTION Compound types
AQL supports two compound types:
- arrays: A composition of unnamed values, each accessible by their positions
- objects / documents: A composition of named values, each accessible by their names
!SUBSECTION Arrays / Lists
The first supported compound type is the array type. Arrays are effectively
sequences of (unnamed / anonymous) values. Individual array elements can be
accessed by their positions. The order of elements in an array is important.
@ -69,35 +77,41 @@ other with the *,* symbol.
In the easiest case, an array is empty and thus looks like:
[ ]
```json
[ ]
```
Array elements can be any legal *expression* values. Nesting of arrays is
supported.
[ 1, 2, 3 ]
[ -99, "yikes!", [ true, [ "no"], [ ] ], 1 ]
[ [ "fox", "marshal" ] ]
```json
[ 1, 2, 3 ]
[ -99, "yikes!", [ true, [ "no"], [ ] ], 1 ]
[ [ "fox", "marshal" ] ]
```
Individual array values can later be accesses bd their positions using the *[]*
Individual array values can later be accessed by their positions using the *[]*
accessor. The position of the accessed element must be a numeric
value. Positions start at 0. It is also possible to use negative index values
value. Positions start at 0. It is also possible to use negative index values
to access array values starting from the end of the array. This is convenient if
the length of the array is unknown and access to elements at the end of the array
is required.
// access 1st array element (element start at index 0)
u.friends[0]
```js
// access 1st array element (elements start at index 0)
u.friends[0]
// access 3rd array element
u.friends[2]
// access 3rd array element
u.friends[2]
// access last array element
u.friends[-1]
// access last array element
u.friends[-1]
// access second last array element
u.friends[-2]
// access second to last array element
u.friends[-2]
```
!SUBSUBSECTION Objects / Documents
!SUBSECTION Objects / Documents
The other supported compound type is the object (or document) type. Objects are a
composition of zero to many attributes. Each attribute is a name/value pair.
@ -108,48 +122,79 @@ object contains zero to many attribute declarations, separated from each other
with the *,* symbol. In the simplest case, an object is empty. Its
declaration would then be:
{ }
```json
{ }
```
Each attribute in an object is a name / value pair. Name and value of an
attribute are separated using the *:* symbol.
The attribute name is mandatory and must be specified as a quoted or unquoted
string. If a keyword is used as an attribute name, the attribute name must be quoted:
{ return : 1 } /* won't work */
{ "return" : 1 } /* works ! */
{ `return` : 1 } /* works, too! */
```js
{ return : 1 } /* won't work */
{ "return" : 1 } /* works ! */
{ `return` : 1 } /* works, too! */
```
Since ArangoDB 2.6, object attribute names can be computed using dynamic expressions, too.
To disambiguate regular attribute names from attribute name expressions, computed
attribute names must be enclosed in *[* and *]*:
{ [ CONCAT("test/", "bar") ] : "someValue" }
```js
{ [ CONCAT("test/", "bar") ] : "someValue" }
```
Since ArangoDB 2.7, there is also shorthand notation for attributes which is handy for
returning existing variables easily:
LET name = "Peter"
LET age = 42
RETURN { name, age }
```js
LET name = "Peter"
LET age = 42
RETURN { name, age }
```
The above is the shorthand equivalent for the generic form:
LET name = "Peter"
LET age = 42
RETURN { name : name, age : age }
```js
LET name = "Peter"
LET age = 42
RETURN { name : name, age : age }
```
Any valid expression can be used as an attribute value. That also means nested
objects can be used as attribute values:
{ name : "Peter" }
{ "name" : "Vanessa", "age" : 15 }
{ "name" : "John", likes : [ "Swimming", "Skiing" ], "address" : { "street" : "Cucumber lane", "zip" : "94242" } }
```json
{ name : "Peter" }
{ "name" : "Vanessa", "age" : 15 }
{ "name" : "John", likes : [ "Swimming", "Skiing" ], "address" : { "street" : "Cucumber lane", "zip" : "94242" } }
```
Individual object attributes can later be accessed by their names using the
*.* accessor. If a non-existing attribute is accessed, the result is *null*.
*.* accessor:
u.address.city.name
u.friends[0].name.first
```js
u.address.city.name
u.friends[0].name.first
```
Attributes can also be accessed using the *[]* accessor:
```js
u["address"]["city"]["name"]
u["friends"][0]["name"]["first"]
```
In contrast to the dot accessor, the square brackets allow for expressions:
```js
LET attr1 = "friends"
LET attr2 = "name"
u[attr1][0][attr2][ CONCAT("fir", "st") ]
```
Note that if a non-existing attribute is accessed in one or the other way,
the result will be *null*, without error or warning.

View File

@ -26,6 +26,6 @@ examples that will cause run-time errors are:
includes unary (logical not/negation), binary (logical and, logical or), and
the ternary operators
Please refer to the [Arango Errors](../ErrorCodes/README.md) page for a list of error codes and
meanings.
Please refer to the [Arango Errors](../../Users/Appendix/ErrorCodes.html) page
for a list of error codes and meanings.

View File

@ -11,34 +11,47 @@ For example, when returning data from a collection with inhomogeneous documents
without modification, the result values will as well have an inhomogeneous
structure. Each result value itself is a document:
FOR u IN users
RETURN u
[ { "id" : 1, "name" : "John", "active" : false },
{ "age" : 32, "id" : 2, "name" : "Vanessa" },
{ "friends" : [ "John", "Vanessa" ], "id" : 3, "name" : "Amy" } ]
```js
FOR u IN users
RETURN u
```
```json
[ { "id": 1, "name": "John", "active": false },
{ "age": 32, "id": 2, "name": "Vanessa" },
{ "friends": [ "John", "Vanessa" ], "id": 3, "name": "Amy" } ]
```
However, if a fixed set of attributes from the collection is queried, then the
query result values will have a homogeneous structure. Each result value is
still a document:
FOR u IN users
RETURN { "id" : u.id, "name" : u.name }
[ { "id" : 1, "name" : "John" },
{ "id" : 2, "name" : "Vanessa" },
{ "id" : 3, "name" : "Amy" } ]
```js
FOR u IN users
RETURN { "id": u.id, "name": u.name }
```
```json
[ { "id": 1, "name": "John" },
{ "id": 2, "name": "Vanessa" },
{ "id": 3, "name": "Amy" } ]
```
It is also possible to query just scalar values. In this case, the result set
is an array of scalars, and each result value is a scalar value:
FOR u IN users
RETURN u.id
[ 1, 2, 3 ]
```js
FOR u IN users
RETURN u.id
```
```json
[ 1, 2, 3 ]
```
If a query does not produce any results because no matching data can be
found, it will produce an empty result array:
[ ]
```json
[ ]
```

View File

@ -86,43 +86,45 @@ Keywords are case-insensitive, meaning they can be specified in lower, upper, or
mixed case in queries. In this documentation, all keywords are written in upper
case to make them distinguishable from other query parts.
In addition to the higher-level operations keywords, there are other keywords.
The current list of keywords is:
There are a few more keywords in addition to the higher-level operation keywords.
Additional keywords may be added in future versions of ArangoDB.
The complete list of keywords is currently:
- FOR
- RETURN
- FILTER
- SORT
- LIMIT
- LET
- COLLECT
- INSERT
- UPDATE
- REPLACE
- REMOVE
- UPSERT
- WITH
- ASC
- DESC
- IN
- INTO
- NOT
- AND
- OR
- LIKE
- NULL
- TRUE
- FALSE
- DISTINCT
- GRAPH
- OUTBOUND
- INBOUND
- ANY
- ALL
- NONE
- AGGREGATE
Additional keywords may be added in future versions of ArangoDB.
<div class="columns-3">
<ul>
<li>AGGREGATE</li>
<li>ALL</li>
<li>AND</li>
<li>ANY</li>
<li>ASC</li>
<li>COLLECT</li>
<li>DESC</li>
<li>DISTINCT</li>
<li>FALSE</li>
<li>FILTER</li>
<li>FOR</li>
<li>GRAPH</li>
<li>IN</li>
<li>INBOUND</li>
<li>INSERT</li>
<li>INTO</li>
<li>LET</li>
<li>LIMIT</li>
<li>NONE</li>
<li>NOT</li>
<li>NULL</li>
<li>OR</li>
<li>OUTBOUND</li>
<li>REMOVE</li>
<li>REPLACE</li>
<li>RETURN</li>
<li>SORT</li>
<li>TRUE</li>
<li>UPDATE</li>
<li>UPSERT</li>
<li>WITH</li>
</ul>
</div>
!SUBSECTION Names
@ -154,8 +156,8 @@ The example can alternatively written as:
Collection names can be used in queries as they are. If a collection happens to
have the same name as a keyword, the name must be enclosed in backticks.
Please refer to the [Naming Conventions in ArangoDB](../NamingConventions/CollectionNames.md) about collection naming
conventions.
Please refer to the [Naming Conventions in ArangoDB](../../Users/DataModeling/NamingConventions/CollectionNames.html)
about collection naming conventions.
!SUBSUBSECTION Attribute names
@ -164,8 +166,8 @@ attribute name must be used. This is because multiple collections with ambiguous
attribute names may be used in a query. To avoid any ambiguity, it is not
allowed to refer to an unqualified attribute name.
Please refer to the [Naming Conventions in ArangoDB](../NamingConventions/AttributeNames.md) for more information about the
attribute naming conventions.
Please refer to the [Naming Conventions in ArangoDB](../../Users/DataModeling/NamingConventions/AttributeNames.html)
for more information about the attribute naming conventions.
FOR u IN users
FOR f IN friends

View File

@ -9,7 +9,7 @@ data values if the operands have the same data types.
The following type order is used when comparing data types:
null < bool < number < string < array < object / document
null < bool < number < string < array/list < object/document
This means *null* is the smallest type in AQL and *document* is the type with
the highest order. If the compared operands have a different type, then the

View File

@ -2,7 +2,7 @@
AQL has the following functions to traverse loosely-structured graphs consisting of
an edge collection and plus the vertices connected by it. If you have created a graph
using the general-graph module you may want to use its more specific [Graph operations](../Aql/GraphOperations.md)
using the general-graph module you may want to use its more specific [Graph operations](Operations.md)
instead.
@ -74,8 +74,8 @@ with the option *includeVertices* set to true.
!SECTION General Purpose Traversals
General purpose traversals with its extendability by visitor functions offer more possibilities over the newer [AQL graph traversals](../Aql/GraphTraversals.md),
however unless you need some of these features you should [prefer AQL graph traversals](../Aql/GraphTraversals.md).
General purpose traversals with its extendability by visitor functions offer more possibilities over the newer [AQL graph traversals](Traversals.md),
however unless you need some of these features you should [prefer AQL graph traversals](Traversals.md).
!SUBSECTION Traversal
*TRAVERSAL(vertexcollection, edgecollection, startVertex, direction, options)*:
@ -516,7 +516,7 @@ path is a document consisting of the following attributes:
RETURN p.vertices[*].name
!SECTION Graph consistency
When [using the graph management functions to remove vertices](../GeneralGraphs/Management.md#remove-a-vertex)
When [using the graph management functions to remove vertices](Management.md#remove-a-vertex)
you have the guaranty that all referencing edges are also removed.
However, if you use document features alone to remove vertices, no edge collections will be adjusted.
This results in an edge with its `_from` or `_to` attribute referring to vanished vertices.
@ -532,7 +532,7 @@ In order to keep the result set consistent between *includeData* enabled or disa
When using any of AQL's general purpose traversal functions, please make sure that the graph
does not contain cycles, or that you at least specify some maximum depth or uniqueness
criteria for a traversal. In contrast [AQL graph traversals](../Aql/GraphTraversals.md) won't trap on cycles.
criteria for a traversal. In contrast [AQL graph traversals](Traversals.md) won't trap on cycles.
If no bounds are set, a traversal may run into an endless loop in a cyclic graph or sub-graph.
Even in a non-cyclic graph, traversing far into the graph may consume a lot of processing

View File

@ -6,12 +6,12 @@ This chapter describes graph related AQL functions.
A lot of the following functions accept a vertex (or edge) example as parameter. This can contain the following:
* {} : Returns all possible vertices for this graph
* *idString* : Returns the vertex/edge with the id *idString*
* [*idString1*, *idString2* ...] : Returns the vertices/edges with the ids matching the given strings.
* {*key1* : *value1*, *key2* : *value2*} : Returns the vertices/edges that match this example, which means that both have *key1* and *key2* with the corresponding attributes
* {*key1.key2* : *value1*, *key3* : *value2*} : It is possible to chain keys, which means that a document *{key1 : {key2 : value1}, key3 : value2}* would be a match
* [{*key1* : *value1*}, {*key2* : *value2*}] : Returns the vertices/edges that match one of the examples, which means that either *key1* or *key2* are set with the corresponding value
- {}: Returns all possible vertices for this graph
- *idString*: Returns the vertex/edge with the id *idString*
- [*idString1*, *idString2* ...]: Returns the vertices/edges with the ids matching the given strings.
- {*key1*: *value1*, *key2*: *value2*}: Returns the vertices/edges that match this example, which means that both have *key1* and *key2* with the corresponding attributes
- {*key1.key2*: *value1*, *key3*: *value2*}: It is possible to chain keys, which means that a document *{key1: {key2: value1}, key3: value2}* would be a match
- [{*key1*: *value1*}, {*key2*: *value2*}]: Returns the vertices/edges that match one of the examples, which means that either *key1* or *key2* are set with the corresponding value
!SUBSECTION The complexity of the shortest path algorithms
@ -24,9 +24,9 @@ the amount of vertices in the graph, *x* the amount of start vertices and *y* th
target vertices. Hence a suggestion may be to use Dijkstra when x\*y < n and the functions supports choosing your algorithm.
!SUBSECTION Example Graph
All examples in this chapter will use [this simple city graph](../Graphs/README.md#the-city-graph):
All examples in this chapter will use [this simple city graph](../../Users/Graphs/index.html#the-city-graph):
![Cities Example Graph](../Graphs/cities_graph.png)
![Cities Example Graph](../../Users/Graphs/cities_graph.png)
!SUBSECTION Edges and Vertices related functions
@ -48,19 +48,19 @@ Hence the default call would have a complexity of **O(n\*m)**;
*Parameters*
* *graphName* : The name of the graph as a string.
* *vertexExample* : An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* (optional) : An object containing the following options:
* *direction* : The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
* *edgeCollectionRestriction* : One or multiple edge collection names. Only edges from these collections will be considered for the path.
* *startVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
* *endVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
* *edgeExamples* : A filter example for the edges (see [example](#short-explanation-of-the-example-parameter)).
* *minDepth* : Defines the minimal length of a path from an edge to a vertex (default is 1, which means only the edges directly connected to a vertex would be returned).
* *maxDepth* : Defines the maximal length of a path from an edge to a vertex (default is 1, which means only the edges directly connected to a vertex would be returned).
* *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is sensible to set this number so unbounded traversals will terminate.
* *includeData*: Defines if the result should contain only ids (false) or if all documents should be fully extracted (true). By default this parameter is set to false, so only ids will be returned.
- *graphName*: The name of the graph as a string.
- *vertexExample*: An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options* (optional): An object containing the following options:
- *direction*: The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
- *edgeCollectionRestriction*: One or multiple edge collection names. Only edges from these collections will be considered for the path.
- *startVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
- *endVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
- *edgeExamples*: A filter example for the edges (see [example](#short-explanation-of-the-example-parameter)).
- *minDepth*: Defines the minimal length of a path from an edge to a vertex (default is 1, which means only the edges directly connected to a vertex would be returned).
- *maxDepth*: Defines the maximal length of a path from an edge to a vertex (default is 1, which means only the edges directly connected to a vertex would be returned).
- *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is sensible to set this number so unbounded traversals will terminate.
- *includeData*: Defines if the result should contain only ids (false) or if all documents should be fully extracted (true). By default this parameter is set to false, so only ids will be returned.
**Examples**
@ -73,20 +73,20 @@ A route planner example, all edges to locations with a distance of either 700 or
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_EDGES("
| +"'routeplanner', {}, {edgeExamples : [{distance: 600}, {distance: 700}]}) RETURN e"
| +"'routeplanner', {}, {edgeExamples: [{distance: 600}, {distance: 700}]}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphEdges1
A route planner example, all outbound edges of Hamburg with a maximal depth of 2 :
A route planner example, all outbound edges of Hamburg with a maximal depth of 2:
@startDocuBlockInline generalGraphEdges2
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphEdges2}
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_EDGES("
| +"'routeplanner', 'germanCity/Hamburg', {direction : 'outbound', maxDepth : 2}) RETURN e"
| +"'routeplanner', 'germanCity/Hamburg', {direction: 'outbound', maxDepth: 2}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -99,8 +99,8 @@ Including the data:
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_EDGES("
| + "'routeplanner', 'germanCity/Hamburg', {direction : 'outbound',"
| + "maxDepth : 2, includeData: true}) RETURN e"
| + "'routeplanner', 'germanCity/Hamburg', {direction: 'outbound',"
| + "maxDepth: 2, includeData: true}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -121,11 +121,11 @@ outbound, inbound or any (default) edges.
*Parameters*
* *graphName* : The name of the graph as a string.
* *vertexExample* : An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* (optional) : An object containing the following options:
* *direction* : The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
* *vertexCollectionRestriction* : One or multiple vertex collections that should be considered.
- *graphName*: The name of the graph as a string.
- *vertexExample*: An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options* (optional): An object containing the following options:
- *direction*: The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
- *vertexCollectionRestriction*: One or multiple vertex collections that should be considered.
**Examples**
@ -150,7 +150,7 @@ A route planner example, all vertices from collection *germanCity*.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_VERTICES("
| +"'routeplanner', {}, {direction : 'any', vertexCollectionRestriction" +
| +"'routeplanner', {}, {direction: 'any', vertexCollectionRestriction" +
" : 'germanCity'}) RETURN e").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -175,18 +175,18 @@ Hence the default call would have a complexity of **O(n\*m)**;
*Parameters*
* *graphName* : The name of the graph as a string.
* *vertexExample* : An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *edgeExamples* : A filter example for the edges to the neighbors (see [example](#short-explanation-of-the-example-parameter)).
* *neighborExamples* : An example for the desired neighbors (see [example](#short-explanation-of-the-example-parameter)).
* *edgeCollectionRestriction* : One or multiple edge collection names. Only edges from these collections will be considered for the path.
* *vertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be contained in the result. This does not effect vertices on the path.
* *minDepth* : Defines the minimal depth a path to a neighbor must have to be returned (default is 1).
* *maxDepth* : Defines the maximal depth a path to a neighbor must have to be returned (default is 1).
* *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is sensible to set this number so unbounded traversals will terminate at some point.
* *includeData* is a boolean value to define if the returned documents should be extracted instead of returning their ids only. The default is *false*.
- *graphName*: The name of the graph as a string.
- *vertexExample*: An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *edgeExamples*: A filter example for the edges to the neighbors (see [example](#short-explanation-of-the-example-parameter)).
- *neighborExamples*: An example for the desired neighbors (see [example](#short-explanation-of-the-example-parameter)).
- *edgeCollectionRestriction*: One or multiple edge collection names. Only edges from these collections will be considered for the path.
- *vertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be contained in the result. This does not effect vertices on the path.
- *minDepth*: Defines the minimal depth a path to a neighbor must have to be returned (default is 1).
- *maxDepth*: Defines the maximal depth a path to a neighbor must have to be returned (default is 1).
- *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is sensible to set this number so unbounded traversals will terminate at some point.
- *includeData* is a boolean value to define if the returned documents should be extracted instead of returning their ids only. The default is *false*.
Note: in ArangoDB versions prior to 2.6 *NEIGHBORS* returned the array of neighbor vertices with
all attributes and not just the vertex ids. To return to the same behavior, set the *includeData*
@ -204,20 +204,20 @@ A route planner example, all neighbors of locations with a distance of either
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_NEIGHBORS("
| +"'routeplanner', {}, {edgeExamples : [{distance: 600}, {distance: 700}]}) RETURN e"
| +"'routeplanner', {}, {edgeExamples: [{distance: 600}, {distance: 700}]}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphNeighbors1
A route planner example, all outbound neighbors of Hamburg with a maximal depth of 2 :
A route planner example, all outbound neighbors of Hamburg with a maximal depth of 2:
@startDocuBlockInline generalGraphNeighbors2
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphNeighbors2}
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_NEIGHBORS("
| +"'routeplanner', 'germanCity/Hamburg', {direction : 'outbound', maxDepth : 2}) RETURN e"
| +"'routeplanner', 'germanCity/Hamburg', {direction: 'outbound', maxDepth: 2}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -257,7 +257,7 @@ A route planner example, all common neighbors of capitals.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_COMMON_NEIGHBORS("
| +"'routeplanner', {isCapital : true}, {isCapital : true}) RETURN e"
| +"'routeplanner', {isCapital: true}, {isCapital: true}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -271,8 +271,8 @@ which have a maximal depth of 2:
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_COMMON_NEIGHBORS("
| +"'routeplanner', 'germanCity/Hamburg', {}, {direction : 'outbound', maxDepth : 2}, "+
| "{direction : 'outbound', maxDepth : 2}) RETURN e"
| +"'routeplanner', 'germanCity/Hamburg', {}, {direction: 'outbound', maxDepth: 2}, "+
| "{direction: 'outbound', maxDepth: 2}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -298,13 +298,13 @@ defined by the parameters vertexExamples.
*Parameters*
* *graphName* : The name of the graph as a string.
* *vertex1Example* : An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
* *vertex2Example* : An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* (optional) : An object containing the following options:
* *vertex1CollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered.
* *vertex2CollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered.
* *ignoreProperties* : One or multiple attributes of a document that should be ignored, either a string or an array..
- *graphName*: The name of the graph as a string.
- *vertex1Example*: An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
- *vertex2Example*: An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options* (optional): An object containing the following options:
- *vertex1CollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered.
- *vertex2CollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered.
- *ignoreProperties*: One or multiple attributes of a document that should be ignored, either a string or an array..
**Examples**
@ -357,12 +357,12 @@ the graph and *m* the average amount of connected edges;
*Parameters*
* *graphName* : The name of the graph as a string.
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *any*, *inbound* and *outbound* (default).
* *followCycles* (optional) : If set to *true* the query follows cycles in the graph, default is false.
* *minLength* (optional) : Defines the minimal length a path must have to be returned (default is 0).
* *maxLength* (optional) : Defines the maximal length a path must have to be returned (default is 10).
- *graphName*: The name of the graph as a string.
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *any*, *inbound* and *outbound* (default).
- *followCycles* (optional): If set to *true* the query follows cycles in the graph, default is false.
- *minLength* (optional): Defines the minimal length a path must have to be returned (default is 0).
- *maxLength* (optional): Defines the maximal length a path must have to be returned (default is 10).
**Examples**
@ -387,7 +387,7 @@ length of 1 and a minimal length of 2:
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("social");
| db._query(
| "RETURN GRAPH_PATHS('social', {direction : 'inbound', minLength : 1, maxLength : 2})"
| "RETURN GRAPH_PATHS('social', {direction: 'inbound', minLength: 1, maxLength: 2})"
).toArray();
~ examples.dropGraph("social");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -420,20 +420,20 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *startVertexExample* : An example for the desired start Vertices (see [example](#short-explanation-of-the-example-parameter)).
* *endVertexExample* : An example for the desired end Vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* (optional) : An object containing the following options:
* *direction* : The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
* *edgeCollectionRestriction* : One or multiple edge collection names. Only edges from these collections will be considered for the path.
* *startVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
* *endVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
* *edgeExamples* : A filter example for the edges in the shortest paths (see [example](#short-explanation-of-the-example-parameter)).
* *algorithm* : The algorithm to calculate the shortest paths. If both start and end vertex examples are empty [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) is used, otherwise the default is [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
* *weight* : The name of the attribute of the edges containing the length as a string.
* *defaultWeight* : Only used with the option *weight*. If an edge does not have the attribute named as defined in option *weight* this default is used as length. If no default is supplied the default would be positive Infinity so the path could not be calculated.
* *stopAtFirstMatch* : Only useful if targetVertices is an example that matches to more than one vertex. If so only the shortest path to the closest of these target vertices is returned. This flag is of special use if you have target pattern and you want to know which vertex with this pattern is matched first.
* *includeData* : Triggers if only *_id*'s are returned (*false*, default) or if data is included for all objects as well (*true*) This will modify the content of *vertex*, *path.vertices* and *path.edges*.
- *graphName*: The name of the graph as a string.
- *startVertexExample*: An example for the desired start Vertices (see [example](#short-explanation-of-the-example-parameter)).
- *endVertexExample*: An example for the desired end Vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options* (optional): An object containing the following options:
- *direction*: The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
- *edgeCollectionRestriction*: One or multiple edge collection names. Only edges from these collections will be considered for the path.
- *startVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
- *endVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
- *edgeExamples*: A filter example for the edges in the shortest paths (see [example](#short-explanation-of-the-example-parameter)).
- *algorithm*: The algorithm to calculate the shortest paths. If both start and end vertex examples are empty [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) is used, otherwise the default is [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
- *weight*: The name of the attribute of the edges containing the length as a string.
- *defaultWeight*: Only used with the option *weight*. If an edge does not have the attribute named as defined in option *weight* this default is used as length. If no default is supplied the default would be positive Infinity so the path could not be calculated.
- *stopAtFirstMatch*: Only useful if targetVertices is an example that matches to more than one vertex. If so only the shortest path to the closest of these target vertices is returned. This flag is of special use if you have target pattern and you want to know which vertex with this pattern is matched first.
- *includeData*: Triggers if only *_id*'s are returned (*false*, default) or if data is included for all objects as well (*true*) This will modify the content of *vertex*, *path.vertices* and *path.edges*.
NOTE: Since version 2.6 we have included a new optional parameter *includeData*.
This parameter triggers if the result contains the real data object *true* or
@ -452,9 +452,9 @@ A route planner example, shortest distance from all german to all french cities:
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_SHORTEST_PATH("
| + "'routeplanner', {}, {}, {" +
| "weight : 'distance', endVertexCollectionRestriction : 'frenchCity', " +
| "weight: 'distance', endVertexCollectionRestriction: 'frenchCity', " +
| "includeData: true, " +
| "startVertexCollectionRestriction : 'germanCity'}) RETURN [e.startVertex, e.vertex._id, " +
| "startVertexCollectionRestriction: 'germanCity'}) RETURN [e.startVertex, e.vertex._id, " +
| "e.distance, LENGTH(e.paths)]"
).toArray();
~ examples.dropGraph("routeplanner");
@ -470,7 +470,7 @@ A route planner example, shortest distance from Hamburg and Cologne to Lyon:
| db._query("FOR e IN GRAPH_SHORTEST_PATH("
| +"'routeplanner', [{_id: 'germanCity/Cologne'},{_id: 'germanCity/Munich'}], " +
| "'frenchCity/Lyon', " +
| "{weight : 'distance'}) RETURN [e.startVertex, e.vertex, e.distance, LENGTH(e.paths)]"
| "{weight: 'distance'}) RETURN [e.startVertex, e.vertex, e.distance, LENGTH(e.paths)]"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -492,46 +492,39 @@ This function performs traversals on the given graph.
The complexity of this function strongly depends on the usage.
*Parameters*
* *graphName* : The name of the graph as a string.
* *startVertexExample* : An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
* *direction* : The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
* *options*: Object containing optional options.
*Options*:
* *strategy*: determines the visitation strategy. Possible values are *depthfirst* and *breadthfirst*. Default is *breadthfirst*.
* *order*: determines the visitation order. Possible values are *preorder* and *postorder*.
* *itemOrder*: determines the order in which connections returned by the expander will be processed. Possible values are *forward* and *backward*.
* *maxDepth*: if set to a value greater than *0*, this will limit the traversal to this maximum depth.
* *minDepth*: if set to a value greater than *0*, all vertices found on a level below the *minDepth* level will not be included in the result.
* *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is sensible to set this number so unbounded traversals will terminate at some point.
* *uniqueness*: an object that defines how repeated visitations of vertices should be handled. The *uniqueness* object can have a sub-attribute *vertices*, and a sub-attribute *edges*. Each sub-attribute can have one of the following values:
* *"none"*: no uniqueness constraints
* *"path"*: element is excluded if it is already contained in the current path. This setting may be sensible for graphs that contain cycles (e.g. A -> B -> C -> A).
* *"global"*: element is excluded if it was already found/visited at any point during the traversal.
* *filterVertices* An optional array of example vertex documents that the traversal will treat specially. If no examples are given, the traversal will handle all encountered vertices equally. If one or many vertex examples are given, the traversal will exclude any non-matching vertex from the result and/or not descend into it. Optionally, filterVertices can contain a string containing the name of a user-defined AQL function that should be responsible for filtering. If so, the AQL function is expected to have the following signature:
`function (config, vertex, path)`
If a custom AQL function is used for filterVertices, it is expected to return one of the following values:
* [ ]: Include the vertex in the result and descend into its connected edges
* [ "prune" ]: Will include the vertex in the result but not descend into its connected edges
* [ "exclude" ]: Will not include the vertex in the result but descend into its connected edges
* [ "prune", "exclude" ]: Will completely ignore the vertex and its connected edges
* *vertexFilterMethod:* Only useful in conjunction with filterVertices and if no user-defined AQL function is used.
If specified, it will influence how vertices are handled that don't match the examples in filterVertices:
* [ "prune" ]: Will include non-matching vertices in the result but not descend into them
* [ "exclude" ]: Will not include non-matching vertices in the result but descend into them
* [ "prune", "exclude" ]: Will completely ignore the vertex and its connected edges
- *graphName*: The name of the graph as a string.
- *startVertexExample*: An example for the desired vertices (see [example](#short-explanation-of-the-example-parameter)).
- *direction*: The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
- *options*: Object containing optional options.
- *strategy*: determines the visitation strategy. Possible values are *depthfirst* and *breadthfirst*. Default is *breadthfirst*.
- *order*: determines the visitation order. Possible values are *preorder* and *postorder*.
- *itemOrder*: determines the order in which connections returned by the expander will be processed. Possible values are *forward* and *backward*.
- *maxDepth*: if set to a value greater than *0*, this will limit the traversal to this maximum depth.
- *minDepth*: if set to a value greater than *0*, all vertices found on a level below the *minDepth* level will not be included in the result.
- *maxIterations*: the maximum number of iterations that the traversal is allowed to perform. It is sensible to set this number so unbounded traversals will terminate at some point.
- *uniqueness*: an object that defines how repeated visitations of vertices should be handled. The *uniqueness* object can have a sub-attribute *vertices*, and a sub-attribute *edges*. Each sub-attribute can have one of the following values:
- *"none"*: no uniqueness constraints
- *"path"*: element is excluded if it is already contained in the current path. This setting may be sensible for graphs that contain cycles (e.g. A -> B -> C -> A).
- *"global"*: element is excluded if it was already found/visited at any point during the traversal.
- *filterVertices* An optional array of example vertex documents that the traversal will treat specially. If no examples are given, the traversal will handle all encountered vertices equally. If one or many vertex examples are given, the traversal will exclude any non-matching vertex from the result and/or not descend into it. Optionally, filterVertices can contain a string containing the name of a user-defined AQL function that should be responsible for filtering. If so, the AQL function is expected to have the following signature: `function (config, vertex, path)`
If a custom AQL function is used for filterVertices, it is expected to return one of the following values:
- [ ]: Include the vertex in the result and descend into its connected edges
- [ "prune" ]: Will include the vertex in the result but not descend into its connected edges
- [ "exclude" ]: Will not include the vertex in the result but descend into its connected edges
- [ "prune", "exclude" ]: Will completely ignore the vertex and its connected edges
- *vertexFilterMethod:* Only useful in conjunction with filterVertices and if no user-defined AQL function is used.
If specified, it will influence how vertices are handled that don't match the examples in filterVertices:
- [ "prune" ]: Will include non-matching vertices in the result but not descend into them
- [ "exclude" ]: Will not include non-matching vertices in the result but descend into them
- [ "prune", "exclude" ]: Will completely ignore the vertex and its connected edges
**Examples**
A route planner example, start a traversal from Hamburg :
A route planner example, start a traversal from Hamburg:
@startDocuBlockInline generalGraphTraversal1
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphTraversal1}
@ -552,7 +545,7 @@ so only the direct neighbors of Hamburg are returned:
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_TRAVERSAL('routeplanner', 'germanCity/Hamburg'," +
| " 'outbound', {maxDepth : 1}) RETURN e"
| " 'outbound', {maxDepth: 1}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -575,21 +568,21 @@ The complexity of this function strongly depends on the usage.
*Parameters*
* *graphName* : The name of the graph as a string.
* *startVertexExample* : An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
* *direction* : The direction of the edges as a string.
Possible values are *outbound*, *inbound* and *any* (default).
* *connectName* : The result attribute which
contains the connection.
* *options* (optional) : An object containing options, see
[Graph Traversals](../Aql/GraphOperations.md#graphtraversal):
- *graphName*: The name of the graph as a string.
- *startVertexExample*: An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
- *direction*: The direction of the edges as a string.
Possible values are *outbound*, *inbound* and *any* (default).
- *connectName*: The result attribute which
contains the connection.
- *options* (optional): An object containing options, see
[Graph Traversals](#graphtraversal):
**Examples**
A route planner example, start a traversal from Hamburg :
A route planner example, start a traversal from Hamburg:
@startDocuBlockInline generalGraphTraversalTree1
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphTraversalTree1}
@ -610,7 +603,7 @@ A route planner example, start a traversal from Hamburg with a max depth of 1 so
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_TRAVERSAL_TREE('routeplanner', 'germanCity/Hamburg',"+
| " 'outbound', 'connnection', {maxDepth : 1}) RETURN e"
| " 'outbound', 'connnection', {maxDepth: 1}) RETURN e"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -642,8 +635,8 @@ A route planner example, distance from all french to all german cities:
var g = examples.loadGraph("routeplanner");
| db._query("FOR e IN GRAPH_DISTANCE_TO("
| +"'routeplanner', {}, {}, { " +
| " weight : 'distance', endVertexCollectionRestriction : 'germanCity', " +
| "startVertexCollectionRestriction : 'frenchCity'}) RETURN [e.startVertex, e.vertex, " +
| " weight: 'distance', endVertexCollectionRestriction: 'germanCity', " +
| "startVertexCollectionRestriction: 'frenchCity'}) RETURN [e.startVertex, e.vertex, " +
| "e.distance]"
).toArray();
~ examples.dropGraph("routeplanner");
@ -659,7 +652,7 @@ A route planner example, distance from Hamburg and Cologne to Lyon:
| db._query("FOR e IN GRAPH_DISTANCE_TO("
| + "'routeplanner', [{_id: 'germanCity/Cologne'},{_id: 'germanCity/Hamburg'}], " +
| "'frenchCity/Lyon', " +
| "{weight : 'distance'}) RETURN [e.startVertex, e.vertex, e.distance]"
| "{weight: 'distance'}) RETURN [e.startVertex, e.vertex, e.distance]"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -689,18 +682,18 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *vertexExample* : An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* (optional) : An object containing the following options:
* *direction* : The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
* *edgeCollectionRestriction* : One or multiple edge collection names. Only edges from these collections will be considered for the path.
* *startVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
* *endVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
* *edgeExamples* : A filter example for the edges in the shortest paths (see [example](#short-explanation-of-the-example-parameter)).
* *algorithm* : The algorithm to calculate the shortest paths as a string. If vertex example is empty [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) is used as default, otherwise the default is [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm)
* *weight* : The name of the attribute of the edges containing the length as a string.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *vertexExample*: An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options* (optional): An object containing the following options:
- *direction*: The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
- *edgeCollectionRestriction*: One or multiple edge collection names. Only edges from these collections will be considered for the path.
- *startVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
- *endVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
- *edgeExamples*: A filter example for the edges in the shortest paths (see [example](#short-explanation-of-the-example-parameter)).
- *algorithm*: The algorithm to calculate the shortest paths as a string. If vertex example is empty [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) is used as default, otherwise the default is [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm)
- *weight*: The name of the attribute of the edges containing the length as a string.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and hence the eccentricity can not be calculated.
@ -728,7 +721,7 @@ This considers the actual distances.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ABSOLUTE_ECCENTRICITY("
+"'routeplanner', {}, {weight : 'distance'})").toArray();
+"'routeplanner', {}, {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphAbsEccentricity2
@ -741,8 +734,8 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ABSOLUTE_ECCENTRICITY("
| + "'routeplanner', {}, {startVertexCollectionRestriction : 'germanCity', " +
"direction : 'outbound', weight : 'distance'})").toArray();
| + "'routeplanner', {}, {startVertexCollectionRestriction: 'germanCity', " +
"direction: 'outbound', weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphAbsEccentricity3
@ -766,12 +759,12 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *options* (optional) : An object containing the following options:
* *direction* : The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
* *algorithm* : The algorithm to calculate the shortest paths as a string. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
* *weight* : The name of the attribute of the edges containing the length as a string.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *options* (optional): An object containing the following options:
- *direction*: The direction of the edges as a string. Possible values are *outbound*, *inbound* and *any* (default).
- *algorithm*: The algorithm to calculate the shortest paths as a string. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
- *weight*: The name of the attribute of the edges containing the length as a string.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the eccentricity can not be calculated.
@ -798,7 +791,7 @@ This considers the actual distances.
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphEccentricity2}
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ECCENTRICITY('routeplanner', {weight : 'distance'})"
| db._query("RETURN GRAPH_ECCENTRICITY('routeplanner', {weight: 'distance'})"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -823,18 +816,18 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *vertexExample* : An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *edgeCollectionRestriction* : One or multiple edge collection names. Only edges from these collections will be considered for the path.
* *startVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
* *endVertexCollectionRestriction* : One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
* *edgeExamples* : A filter example for the edges in the shortest paths (see [example](#short-explanation-of-the-example-parameter)).
* *algorithm* : The algorithm to calculate the shortest paths. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
* *weight* : The name of the attribute of the edges containing the length.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *vertexExample*: An example for the desired
vertices (see [example](#short-explanation-of-the-example-parameter)).
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *edgeCollectionRestriction*: One or multiple edge collection names. Only edges from these collections will be considered for the path.
- *startVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as start vertex of a path.
- *endVertexCollectionRestriction*: One or multiple vertex collection names. Only vertices from these collections will be considered as end vertex of a path.
- *edgeExamples*: A filter example for the edges in the shortest paths (see [example](#short-explanation-of-the-example-parameter)).
- *algorithm*: The algorithm to calculate the shortest paths. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
- *weight*: The name of the attribute of the edges containing the length.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the eccentricity can not be calculated.
@ -862,7 +855,7 @@ This considers the actual distances.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ABSOLUTE_CLOSENESS("
+"'routeplanner', {}, {weight : 'distance'})").toArray();
+"'routeplanner', {}, {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphAbsCloseness2
@ -875,8 +868,8 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ABSOLUTE_CLOSENESS("
| + "'routeplanner', {}, {startVertexCollectionRestriction : 'germanCity', " +
"direction : 'outbound', weight : 'distance'})").toArray();
| + "'routeplanner', {}, {startVertexCollectionRestriction: 'germanCity', " +
"direction: 'outbound', weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphAbsCloseness3
@ -900,12 +893,12 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *algorithm* : The algorithm to calculate the shortest paths. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
* *weight* : The name of the attribute of the edges containing the length.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *algorithm*: The algorithm to calculate the shortest paths. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
- *weight*: The name of the attribute of the edges containing the length.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this defaultis used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the eccentricity can not be calculated.
@ -933,7 +926,7 @@ This considers the actual distances.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_CLOSENESS("
+"'routeplanner', {weight : 'distance'})").toArray();
+"'routeplanner', {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphCloseness2
@ -946,7 +939,7 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_CLOSENESS("
| + "'routeplanner',{direction : 'outbound', weight : 'distance'})"
| + "'routeplanner',{direction: 'outbound', weight: 'distance'})"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -970,11 +963,11 @@ The complexity of the function is described
[here](#the-complexity-of-the-shortest-path-algorithms).
* *graphName* : The name of the graph as a string.
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *weight* : The name of the attribute of the edges containing the length.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *weight*: The name of the attribute of the edges containing the length.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the betweenness can not be calculated.
@ -1002,7 +995,7 @@ This considers the actual distances.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ABSOLUTE_BETWEENNESS("
+"'routeplanner', {weight : 'distance'})").toArray();
+"'routeplanner', {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphAbsBetweenness2
@ -1015,7 +1008,7 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_ABSOLUTE_BETWEENNESS("
| + "'routeplanner', {direction : 'outbound', weight : 'distance'})"
| + "'routeplanner', {direction: 'outbound', weight: 'distance'})"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -1040,11 +1033,11 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *weight* : The name of the attribute of the edges containing the length.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *weight*: The name of the attribute of the edges containing the length.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the eccentricity can not be calculated.
@ -1071,7 +1064,7 @@ This considers the actual distances.
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphBetweenness2}
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
db._query("RETURN GRAPH_BETWEENNESS('routeplanner', {weight : 'distance'})").toArray();
db._query("RETURN GRAPH_BETWEENNESS('routeplanner', {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphBetweenness2
@ -1084,7 +1077,7 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_BETWEENNESS("
+ "'routeplanner', {direction : 'outbound', weight : 'distance'})").toArray();
+ "'routeplanner', {direction: 'outbound', weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphBetweenness3
@ -1099,19 +1092,18 @@ outbound paths.
`GRAPH_RADIUS (graphName, options)`
*The GRAPH\_RADIUS function returns the
[radius](http://en.wikipedia.org/wiki/Eccentricity_%28graph_theory%29)
of a graph.*
The GRAPH_RADIUS function returns the
[radius](http://en.wikipedia.org/wiki/Eccentricity_%28graph_theory%29) of a graph.
The complexity of the function is described
[here](#the-complexity-of-the-shortest-path-algorithms).
* *graphName* : The name of the graph as a string.
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *algorithm* : The algorithm to calculate the shortest paths as a string. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
* *weight* : The name of the attribute of the edges containing the length.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *algorithm*: The algorithm to calculate the shortest paths as a string. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
- *weight*: The name of the attribute of the edges containing the length.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the eccentricity can not be calculated.
@ -1138,7 +1130,7 @@ This considers the actual distances.
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphRadius2}
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
db._query("RETURN GRAPH_RADIUS('routeplanner', {weight : 'distance'})").toArray();
db._query("RETURN GRAPH_RADIUS('routeplanner', {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphRadius2
@ -1151,7 +1143,7 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_RADIUS("
| + "'routeplanner', {direction : 'outbound', weight : 'distance'})"
| + "'routeplanner', {direction: 'outbound', weight: 'distance'})"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@ -1176,12 +1168,12 @@ The complexity of the function is described
*Parameters*
* *graphName* : The name of the graph as a string.
* *options* : An object containing the following options:
* *direction* : The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
* *algorithm* : The algorithm to calculate the shortest paths as a string. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
* *weight* : The name of the attribute of the edges containing the length.
* *defaultWeight* : Only used with the option *weight*.
- *graphName*: The name of the graph as a string.
- *options*: An object containing the following options:
- *direction*: The direction of the edges. Possible values are *outbound*, *inbound* and *any* (default).
- *algorithm*: The algorithm to calculate the shortest paths as a string. Possible values are [Floyd-Warshall](http://en.wikipedia.org/wiki/Floyd%E2%80%93Warshall_algorithm) (default) and [Dijkstra](http://en.wikipedia.org/wiki/Dijkstra's_algorithm).
- *weight*: The name of the attribute of the edges containing the length.
- *defaultWeight*: Only used with the option *weight*.
If an edge does not have the attribute named as defined in option *weight* this default is used as length.
If no default is supplied the default would be positive Infinity so the path and
hence the eccentricity can not be calculated.
@ -1208,7 +1200,7 @@ This considers the actual distances.
@EXAMPLE_ARANGOSH_OUTPUT{generalGraphDiameter2}
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
db._query("RETURN GRAPH_DIAMETER('routeplanner', {weight : 'distance'})").toArray();
db._query("RETURN GRAPH_DIAMETER('routeplanner', {weight: 'distance'})").toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphDiameter2
@ -1221,11 +1213,8 @@ outbound paths.
var examples = require("@arangodb/graph-examples/example-graph.js");
var g = examples.loadGraph("routeplanner");
| db._query("RETURN GRAPH_DIAMETER("
| + "'routeplanner', {direction : 'outbound', weight : 'distance'})"
| + "'routeplanner', {direction: 'outbound', weight: 'distance'})"
).toArray();
~ examples.dropGraph("routeplanner");
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock generalGraphDiameter3

View File

@ -1,15 +1,15 @@
!CHAPTER Graphs in AQL
As you already [read about graphs in ArangoDB](../Graphs/README.md) you can have several views on graphs.
As you already [read about graphs in ArangoDB](../../Users/Graphs/index.html) you can have several views on graphs.
There are also several ways to work with graphs in AQL.
You can use named graphs where ArangoDB manages the collections involved in one graph.
You can also use graph functions on a combination of document and edge collections.
named graphs are defined though the [graph-module](../GeneralGraphs/README.md), that contains the name of the graph, and the vertex and edge collections involved.
named graphs are defined though the [graph-module](../../Users/Graphs/GeneralGraphs/index.html), that contains the name of the graph, and the vertex and edge collections involved.
Since the management functions are layered on top of simple sets of document and edge collections, you can also use regular AQL functions to work with them.
In AQL you can reach several graphing functions:
* [AQL Traversals](GraphTraversals.md) is making full use of optimisations and therefore best performance is to be expected. It can work on named graphs and loosely coupled collection sets (aka anonymous graphs). You can use AQL filter conditions on traversals.
* [Named graph Operations](GraphOperations.md) work on named graphs; offer a versatile range of parameters.
* [Other graph functions](GraphFunctions.md) work on single edge collection (which may also be part of named graphs).
* [AQL Traversals](Traversals.md) is making full use of optimisations and therefore best performance is to be expected. It can work on named graphs and loosely coupled collection sets (aka anonymous graphs). You can use AQL filter conditions on traversals.
* [Named graph Operations](Operations.md) work on named graphs; offer a versatile range of parameters.
* [Other graph functions](Functions.md) work on single edge collection (which may also be part of named graphs).

View File

@ -18,7 +18,7 @@ For all vertices that where visited during this process in the range between *mi
Let's take a look at a simple example to explain how it works.
This is the graph that we are going to traverse:
![traversal graph](../Graphs/traversal_graph.png)
![traversal graph](../../Users/Graphs/traversal_graph.png)
Now we use the following parameters for our query:
@ -27,36 +27,36 @@ Now we use the following parameters for our query:
3. We use a *max-depth* of 2.
4. We follow only *outbound* direction of edges
![traversal graph step 1](../Graphs/traversal_graph1.png)
![traversal graph step 1](../../Users/Graphs/traversal_graph1.png)
Now it walks to one of the direct neighbors of **A**, say **B** (NOTE: ordering is not guaranteed):
![traversal graph step 2](../Graphs/traversal_graph2.png)
![traversal graph step 2](../../Users/Graphs/traversal_graph2.png)
The query will remember the state (red circle) and will emit the first result **A** -> **B** (black box).
This will also prevent the traverser to be trapped in cycles.
Now again it will visit one of the direct neighbors of **B**, say **E**:
![traversal graph step 3](../Graphs/traversal_graph3.png)
![traversal graph step 3](../../Users/Graphs/traversal_graph3.png)
We have limited the query with a *max-depth* of *2* then it will not pick any neighbor of **E** as the path from **A** to **E** already requires *2* steps.
Instead we will go back one level to **B** and continue with any other direct neighbor there:
![traversal graph step 4](../Graphs/traversal_graph4.png)
![traversal graph step 4](../../Users/Graphs/traversal_graph4.png)
Again after we produced this result we will step back to **B**.
But there is no neighbor of **B** left that we have not yet visited.
Hence we go another step back to **A** and continue with any other neighbor there.
![traversal graph step 5](../Graphs/traversal_graph5.png)
![traversal graph step 5](../../Users/Graphs/traversal_graph5.png)
And identical to the iterations before we will visit **H**:
![traversal graph step 6](../Graphs/traversal_graph6.png)
![traversal graph step 6](../../Users/Graphs/traversal_graph6.png)
And **J**:
![traversal graph step 7](../Graphs/traversal_graph7.png)
![traversal graph step 7](../../Users/Graphs/traversal_graph7.png)
And after these steps there is no further result left.
So all together this query has returned the following paths:
@ -72,7 +72,7 @@ So all together this query has returned the following paths:
!SUBSECTION Syntax
Now let's see how we can write a query that follows this schema.
You have two options here, you can either use a named graph (see [the graphs chapter](../Graphs/README.md) on how to create it) or anonymous graphs.
You have two options here, you can either use a named graph (see [the graphs chapter](index.html) on how to create it) or anonymous graphs.
!SUBSUBSECTION Working on named graphs:
@ -81,18 +81,18 @@ You have two options here, you can either use a named graph (see [the graphs cha
`OUTBOUND|INBOUND|ANY` startVertex
`GRAPH` graphName
- `FOR` - emits up to three variables:
- **vertex**: the current vertex in a traversal
- **edge**: *(optional)* the current edge in a traversal
- **path**: *(optional, requires edge to be present)* an object representing the current path with two members:
- `vertices`: an array of all vertices on this path.
- `edges`: an array of all edges on this path.
- `IN` `MIN`..`MAX` `OUTBOUND` startVertex `GRAPH` graphName
- `OUTBOUND|INBOUND|ANY` traversal will be done for outbound / inbound / inbound+outbound pointing edges
- **startVertex**: one vertex where the traversal will originate from, this can be specified in the form of an id string or in the form of a document with the attribute `_id`. All other values will lead to a warning and an empty result. If the specified id does not exist, the result is empty as well and there is no warning.
- **graphName**: the name identifying the named graph. It's vertex and edge collections will be looked up.
- `MIN`: edges and vertices returned by this query will start at the traversal depth of `MIN` (thus edges and vertices below will not be returned). If not specified, defaults to `1`, which is the minimal possible value.
- `MAX`: up to `MAX` length paths are traversed. If omitted in the query, `MAX` equals `MIN`. Thus only the vertices and edges in the range of `MIN` are returned.
- `FOR` - emits up to three variables:
- **vertex**: the current vertex in a traversal
- **edge**: *(optional)* the current edge in a traversal
- **path**: *(optional, requires edge to be present)* an object representing the current path with two members:
- `vertices`: an array of all vertices on this path.
- `edges`: an array of all edges on this path.
- `IN` `MIN`..`MAX` `OUTBOUND` startVertex `GRAPH` graphName
- `OUTBOUND|INBOUND|ANY` traversal will be done for outbound / inbound / inbound+outbound pointing edges
- **startVertex**: one vertex where the traversal will originate from, this can be specified in the form of an id string or in the form of a document with the attribute `_id`. All other values will lead to a warning and an empty result. If the specified id does not exist, the result is empty as well and there is no warning.
- **graphName**: the name identifying the named graph. It's vertex and edge collections will be looked up.
- `MIN`: edges and vertices returned by this query will start at the traversal depth of `MIN` (thus edges and vertices below will not be returned). If not specified, defaults to `1`, which is the minimal possible value.
- `MAX`: up to `MAX` length paths are traversed. If omitted in the query, `MAX` equals `MIN`. Thus only the vertices and edges in the range of `MIN` are returned.
!SUBSUBSECTION Working on collection sets:
@ -171,7 +171,7 @@ This is because for all results in depth `1` the second edge does not exist and
!SUBSUBSECTION Examples
We will create a simple symmetric traversal demonstration graph:
![traversal graph](../Graphs/traversal_graph.png)
![traversal graph](../../Users/Graphs/traversal_graph.png)
@startDocuBlockInline GRAPHTRAV_01_create_graph
@EXAMPLE_ARANGOSH_OUTPUT{GRAPHTRAV_01_create_graph}
@ -260,7 +260,8 @@ Due to this we will see duplicate nodes in the result.
!SUBSUBSECTION Use the AQL explainer for optimizations
Now lets have a look what the optimizer does behind the curtains and inspect traversal queries using [the explainer](Optimizer.md):
Now lets have a look what the optimizer does behind the curtains and inspect traversal queries using
[the explainer](../ExecutionAndPerformance/Optimizer.md):
@startDocuBlockInline GRAPHTRAV_07_traverse_7
@ -284,6 +285,6 @@ And finally clean it up again:
@endDocuBlock GRAPHTRAV_99_drop_graph
If this traversal is not powerful enough for your needs, so you cannot describe your conditions as AQL filter statements you might want to look at [manually crafted traverser](../Traversals/README.md).
If this traversal is not powerful enough for your needs, so you cannot describe your conditions as AQL filter statements you might want to look at [manually crafted traverser](../../Users/Graphs/Traversals/index.html).
[See here for more traversal examples](../AqlExamples/CombiningGraphTraversals.md).
[See here for more traversal examples](../Examples/CombiningGraphTraversals.md).

View File

@ -1,11 +1,11 @@
!CHAPTER How to invoke AQL
You can run AQL queries from your application via the HTTP REST API. The full
API description is available at [HTTP Interface for AQL Query Cursors](../HttpAqlQueryCursor/README.md).
API description is available at [HTTP Interface for AQL Query Cursors](../../HTTP/AqlQueryCursor/index.html).
The ArangoDB Web Interface has a [specific tab for AQL queries execution](AqlWithWebInterface.md).
The ArangoDB Web Interface has a [specific tab for AQL queries execution](../Invocation/WithWebInterface.md).
You can run [AQL queries from the ArangoDB Shell](AqlWithArangosh.md) with the *_query* and *_createStatement* methods of the *db* object.
You can run [AQL queries from the ArangoDB Shell](../Invocation/WithArangosh.md) with the *_query* and *_createStatement* methods of the *db* object.
This chapter also describes how to use bind parameters, statistics, counting and cursors with arangosh.

View File

@ -88,7 +88,7 @@ It is always possible to retrieve statistics for a query with the *getExtra* met
@END_EXAMPLE_ARANGOSH_OUTPUT
@endDocuBlock 03_workWithAQL_getExtra
The meaning of the statistics values is described in [Execution statistics](QueryStatistics.md).
The meaning of the statistics values is described in [Execution statistics](../ExecutionAndPerformance/QueryStatistics.md).
You also will find warnings in here; If you're designing queries on the shell be sure to also look at it.
!SUBSECTION with _createStatement (ArangoStatement)

View File

@ -12,18 +12,22 @@ They are defined as JSON values, the same format that is used for bind parameter
Here is an example:
for doc in @@collection
FILTER CONTAINS(LOWER(doc.author), @search, false)
return {"name":doc.name, "descr": doc.description, "author":doc.author}
```js
FOR doc IN @@collection
FILTER CONTAINS(LOWER(doc.author), @search, false)
RETURN { "name": doc.name, "descr": doc.description, "author": doc.author }
```
Bind parameter:
{
"@collection":"_apps",
"search":"arango"
}
```js
{
"@collection": "_apps",
"search": "arango"
}
```
An overview of Bind Parameters may be found in [Aql Fundamentals](BindParameters.md).
An overview of Bind Parameters may be found in [Aql Fundamentals](../Fundamentals/BindParameters.md).
Queries can also be saved in the AQL editor along with their bind parameter values for later reuse. This data is stored in the user profile in the current database (in the *_users* system table).

View File

@ -34,7 +34,7 @@ In the above example, will be included in the result all array elements from *us
an attribute *active* with value *true* and that have an attribute *age* with a
value less than *39* (including *null* ones). All other elements from *users*
will be skipped and not be included in the result produced by *RETURN*.
You may refer to the chapter [Accessing Data from Collections](../Aql/DocumentData.md) for
You may refer to the chapter [Accessing Data from Collections](../Fundamentals/DocumentData.md) for
a description of the impact of non-existent or null attributes.

View File

@ -14,7 +14,7 @@ There also is a special case for graph traversals:
FOR vertex-variable-name, edge-variable-name, path-variable-name IN traversal-expression
```
For this special case see [the graph traversals chapter](../Aql/GraphTraversals.md).
For this special case see [the graph traversals chapter](../Graphs/Traversals.md).
For all other cases read on:
Each array element returned by *expression* is visited exactly once. It is

View File

@ -5,7 +5,7 @@ single server, an insert operation is executed transactionally in an all-or-noth
fashion. For sharded collections, the entire insert operation is not transactional.
Each *INSERT* operation is restricted to a single collection, and the
[collection name](../Glossary/README.md#collection-name) must not be dynamic.
[collection name](../../Users/Appendix/Glossary.html#collection-name) must not be dynamic.
Only a single *INSERT* statement per collection is allowed per AQL query, and
it cannot be followed by read operations that access the same collection, by
traversal operations, or AQL functions that can read documents.
@ -29,7 +29,7 @@ FOR i IN 1..100
INSERT { value: i } IN numbers
```
When inserting into an [edge collection](../Glossary/README.md#edge-collection), it is mandatory to specify the attributes
When inserting into an [edge collection](../../Users/Appendix/Glossary.html#edge-collection), it is mandatory to specify the attributes
*_from* and *_to* in document:
```

View File

@ -2,16 +2,16 @@
The following high-level operations are described here after:
* [FOR](../AqlOperations/For.md) : to iterate over all elements of an array.
* [RETURN](../AqlOperations/Return.md) : to produce the result of a query.
* [FILTER](../AqlOperations/Filter.md) : to restrict the results to elements that match an arbitrary logical condition.
* [SORT](../AqlOperations/Sort.md) : to force a sort of the array of already produced intermediate results.
* [LIMIT](../AqlOperations/Limit.md) : to reduce the number of elements in the result to at most the specified number.
* [LET](../AqlOperations/Let.md) : to assign an arbitrary value to a variable.
* [COLLECT](../AqlOperations/Collect.md) : to group an array by one or multiple group criteria.
* [REMOVE](../AqlOperations/Remove.md) : to remove documents from a collection.
* [UPDATE](../AqlOperations/Update.md) : to partially update documents in a collection.
* [REPLACE](../AqlOperations/Replace.md) : to completely replace documents in a collection.
* [INSERT](../AqlOperations/Insert.md) : to insert new documents into a collection.
* [UPSERT](../AqlOperations/Upsert.md) : to update an existing document, or create it in the case it does not exist.
* [FOR](For.md) : to iterate over all elements of an array.
* [RETURN](Return.md) : to produce the result of a query.
* [FILTER](Filter.md) : to restrict the results to elements that match an arbitrary logical condition.
* [SORT](Sort.md) : to force a sort of the array of already produced intermediate results.
* [LIMIT](Limit.md) : to reduce the number of elements in the result to at most the specified number.
* [LET](Let.md) : to assign an arbitrary value to a variable.
* [COLLECT](Collect.md) : to group an array by one or multiple group criteria.
* [REMOVE](Remove.md) : to remove documents from a collection.
* [UPDATE](Update.md) : to partially update documents in a collection.
* [REPLACE](Replace.md) : to completely replace documents in a collection.
* [INSERT](Insert.md) : to insert new documents into a collection.
* [UPSERT](Upsert.md) : to update an existing document, or create it in the case it does not exist.

View File

@ -7,7 +7,7 @@ all-or-nothing fashion. For sharded collections, the entire remove operation
is not transactional.
Each *REMOVE* operation is restricted to a single collection, and the
[collection name](../Glossary/README.md#collection-name) must not be dynamic.
[collection name](../../Users/Appendix/Glossary.html#collection-name) must not be dynamic.
Only a single *REMOVE* statement per collection is allowed per AQL query, and
it cannot be followed by read operations that access the same collection, by
traversal operations, or AQL functions that can read documents.
@ -20,7 +20,8 @@ REMOVE key-expression IN collection options
*collection* must contain the name of the collection to remove the documents
from. *key-expression* must be an expression that contains the document identification.
This can either be a string (which must then contain the [document key](../Glossary/README.md#document-key)) or a
This can either be a string (which must then contain the
[document key](../../Users/Appendix/Glossary.html#document-key)) or a
document, which must contain a *_key* attribute.
The following queries are thus equivalent:

View File

@ -5,7 +5,7 @@ single server, the replace operation is executed transactionally in an all-or-no
fashion. For sharded collections, the entire replace operation is not transactional.
Each *REPLACE* operation is restricted to a single collection, and the
[collection name](../Glossary/README.md#collection-name) must not be dynamic.
[collection name](../../Users/Appendix/Glossary.html#collection-name) must not be dynamic.
Only a single *REPLACE* statement per collection is allowed per AQL query, and
it cannot be followed by read operations that access the same collection, by
traversal operations, or AQL functions that can read documents.

View File

@ -5,7 +5,7 @@ single server, updates are executed transactionally in an all-or-nothing fashion
For sharded collections, the entire update operation is not transactional.
Each *UPDATE* operation is restricted to a single collection, and the
[collection name](../Glossary/README.md#collection-name) must not be dynamic.
[collection name](../../Users/Appendix/Glossary.html#collection-name) must not be dynamic.
Only a single *UPDATE* statement per collection is allowed per AQL query, and
it cannot be followed by read operations that access the same collection, by
traversal operations, or AQL functions that can read documents.

View File

@ -6,7 +6,7 @@ On a single server, upserts are executed transactionally in an all-or-nothing fa
For sharded collections, the entire update operation is not transactional.
Each *UPSERT* operation is restricted to a single collection, and the
[collection name](../Glossary/README.md#collection-name) must not be dynamic.
[collection name](../../Users/Appendix/Glossary.html#collection-name) must not be dynamic.
Only a single *UPSERT* statement per collection is allowed per AQL query, and
it cannot be followed by read operations that access the same collection, by
traversal operations, or AQL functions that can read documents.

View File

@ -264,7 +264,8 @@ will produce the following result:
!SUBSUBSECTION Array operators
AQL provides [array operators](ArrayOperators.md) <i>[\*]</i> for array variable expansion and <i>[\*\*]</i> for array contraction.
AQL provides [array operators](Advanced/ArrayOperators.md) <i>[\*]</i> for
array variable expansion and <i>[\*\*]</i> for array contraction.
!SUBSUBSECTION Operator precedence

View File

@ -26,5 +26,5 @@ The syntax of AQL queries is different to SQL, even if some keywords overlap.
Nevertheless, AQL should be easy to understand for anyone with an SQL background.
For some example queries, please refer to the pages [Data Modification Queries](DataModification.md) and
[Usual query patterns](../AqlExamples/README.md).
[Usual query patterns](Examples/README.md).

View File

@ -26,7 +26,6 @@
* [Modifying](Collection/Modifying.md)
* [Indexes](Indexes/README.md)
* [Working with Indexes](Indexes/WorkingWith.md)
* [Cap Constraints](Indexes/Cap.md)
* [Hash](Indexes/Hash.md)
* [Skiplist](Indexes/Skiplist.md)
* [Geo](Indexes/Geo.md)

View File

@ -32,16 +32,6 @@ Sharding only should be used by developers!
@startDocuBlock JSF_cluster_test_HEAD
<!-- js/actions/api-cluster.js -->
@startDocuBlock JSF_cluster_planner_POST
<!-- js/actions/api-cluster.js -->
@startDocuBlock JSF_cluster_dispatcher_POST
<!-- js/actions/api-cluster.js -->
@startDocuBlock JSF_cluster_check_port_GET

View File

@ -27,7 +27,7 @@ or use authentication, you can use the following command-line options:
* *--server.disable-authentication <bool>*: whether or not to use authentication
Here's an example of dumping data from a non-standard endpoint, using a dedicated
[database name](../Glossary/README.md#database-name):
[database name](../Appendix/Glossary.md#database-name):
unix> arangodump --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --output-directory "dump"

View File

@ -7,8 +7,8 @@ The most convenient method to import a lot of data into ArangoDB is to use the
*arangoimp* command-line tool. It allows you to import data records from a file
into an existing database collection.
It is possible to import [document keys](../Glossary/README.md#document-key) with the documents using the *_key*
attribute. When importing into an [edge collection](../Glossary/README.md#edge-collection), it is mandatory that all
It is possible to import [document keys](../Appendix/Glossary.md#document-key) with the documents using the *_key*
attribute. When importing into an [edge collection](../Appendix/Glossary.md#edge-collection), it is mandatory that all
imported documents have the *_from* and *_to* attributes, and that they contain
valid references.

View File

@ -33,7 +33,7 @@ target database, the username and passwords passed to _arangorestore_ (in option
new database.
Here's an example of reloading data to a non-standard endpoint, using a dedicated
[database name](../Glossary/README.md#database-name):
[database name](../Appendix/Glossary.md#database-name):
unix> arangorestore --server.endpoint tcp://192.168.173.13:8531 --server.username backup --server.database mydb --input-directory "dump"
@ -94,7 +94,7 @@ It can be specified multiple times if required:
unix> arangorestore --collection myusers --collection myvalues --input-directory "dump"
Collections will be processed by in alphabetical order by _arangorestore_, with all document
collections being processed before all [edge collection](../Glossary/README.md#edge-collection)s. This is to ensure that reloading
collections being processed before all [edge collection](../Appendix/Glossary.md#edge-collection)s. This is to ensure that reloading
data into edge collections will have the document collections linked in edges (*_from* and
*_to* attributes) loaded.
@ -113,7 +113,7 @@ Note that setting *--recycle-ids* to *true* will also cause collections to be (r
the target database with the exact same collection id as in the input directory. Any potentially
existing collection in the target database with the same collection id will then be dropped.
Setting *--recycle-ids* to *false* or omitting it will only use the [collection name](../Glossary/README.md#collection-name) from the
Setting *--recycle-ids* to *false* or omitting it will only use the [collection name](../Appendix/Glossary.md#collection-name) from the
input directory and allow the target database to create the collection with a different id
(though with the same name) than in the input directory.

View File

@ -3,7 +3,7 @@
The ArangoDB server can listen for incoming requests on multiple *endpoints*.
The endpoints are normally specified either in ArangoDB's configuration file or on
the command-line, using the ["--server.endpoint"](../ConfigureArango/Arangod.md) option.
the command-line, using the ["--server.endpoint"](../Configuration/Arangod.md) option.
The default endpoint for ArangoDB is *tcp://127.0.0.1:8529* or *tcp://localhost:8529*.
ArangoDB can also do a so called *broadcast bind* using *tcp://0.0.0.0:8529*. This way
it will be reachable on all interfaces of the host. This may be useful
@ -47,7 +47,7 @@ When not in the default database, you must first switch to it using the
returns a list of all endpoints
`db._listEndpoints()`
`db._endpoints()`
Returns a list of all endpoints and their mapped databases.

View File

@ -6,7 +6,7 @@ and the *replication applier*. Since ArangoDB 2.2, the replication logger has no
special purpose anymore and is available for downwards-compatibility only.
The replication applier can be administered via the command line or a REST API
(see [HTTP Interface for Replication](../HttpReplications/README.md)).
(see [HTTP Interface for Replication](../../../HTTP/Replications/index.html)).
As replication is configured on a per-database level and there can be multiple
databases inside one ArangoDB instance, there can be multiple replication appliers
@ -72,7 +72,8 @@ operation that was written to the server's write-ahead log. It can be used to de
operations were logged, and is also used by the replication applier for incremental
fetching of data.
**Note**: The replication logger state can also be queried via the [HTTP API](../HttpReplications/README.md).
**Note**: The replication logger state can also be queried via the
[HTTP API](../../../HTTP/Replications/index.html).
To query which data ranges are still available for replication clients to fetch,
the logger provides the *firstTick* and *tickRanges* functions:
@ -198,7 +199,7 @@ Here is an example of the state after the replication applier terminated itself
```
**Note**: the state of a database's replication applier is queryable via the HTTP API, too.
Please refer to [HTTP Interface for Replication](../HttpReplications/README.md) for more details.
Please refer to [HTTP Interface for Replication](../../../HTTP/Replications/index.html) for more details.
!SUBSUBSECTION All-in-one setup

View File

@ -226,5 +226,5 @@ Note that while a slave has only partly executed a transaction from the master,
a write lock on the collections involved in the transaction.
You may also want to check the master and slave states via the HTTP APIs
(see [HTTP Interface for Replication](../HttpReplications/README.md)).
(see [HTTP Interface for Replication](../../../HTTP/Replications/index.html)).

View File

@ -18,8 +18,7 @@ removed in later versions of ArangoDB:
return all write operations for user-defined collections, but it will exclude write
operations for certain system collections. The following collections are excluded
intentionally from replication: *_apps*, *_trx*, *_replication*, *_configuration*,
*_cluster_kickstart_plans*, *_jobs*, *_queues*, *_sessions*, *_foxxlog* and all
statistics collections.
*_jobs*, *_queues*, *_sessions*, *_foxxlog* and all statistics collections.
Write operations for the following system collections can be queried from a master:
*_aqlfunctions*, *_graphs*, *_users*.
* Foxx applications consist of database entries and application scripts in the file system.

View File

@ -1,224 +0,0 @@
!CHAPTER How to try it out
In this text we assume that you are working with a standard installation
of ArangoDB with at least a version number of 2.0. This means that everything
is compiled for cluster operation, that *etcd* is compiled and
the executable is installed in the location mentioned in the
configuration file. The first step is to switch on the dispatcher
functionality in your configuration of *arangod*. In order to do this, change
the *cluster.disable-dispatcher-kickstarter* and
*cluster.disable-dispatcher-interface* options in *arangod.conf* both
to *false*.
**Note**: Once you switch *cluster.disable-dispatcher-interface* to
*false*, the usual web front end is automatically replaced with the
web front end for cluster planning. Therefore you can simply point
your browser to *http://localhost:8529* (if you are running on the
standard port) and you are guided through the planning and launching of
a cluster with a graphical user interface. Alternatively, you can follow
the instructions below to do the same on the command line interface.
We will first plan and launch a cluster, such that all your servers run
on the local machine.
Start up a regular ArangoDB, either in console mode or connect to it with
the Arango shell *arangosh*. Then you can ask it to plan a cluster for
you:
```js
arangodb> var Planner = require("@arangodb/cluster").Planner;
arangodb> p = new Planner({numberOfDBservers:3, numberOfCoordinators:2});
[object Object]
```
If you are curious you can look at the plan of your cluster:
```
arangodb> p.getPlan();
```
This will show you a huge JSON document. More interestingly, some further
components tell you more about the layout of your cluster:
```js
arangodb> p.DBservers;
[
{
"id" : "Pavel",
"dispatcher" : "me",
"port" : 8629
},
{
"id" : "Perry",
"dispatcher" : "me",
"port" : 8630
},
{
"id" : "Pancho",
"dispatcher" : "me",
"port" : 8631
}
]
arangodb> p.coordinators;
[
{
"id" : "Claus",
"dispatcher" : "me",
"port" : 8530
},
{
"id" : "Chantalle",
"dispatcher" : "me",
"port" : 8531
}
]
```
This tells you the ports on which your ArangoDB processes will listen.
We will need the 8530 (or whatever appears on your machine) for the
coordinators below.
More interesting is that such a cluster plan document can be used to
start up the cluster conveniently using a *Kickstarter* object. Please
note that the *launch* method of the kickstarter shown below initializes
all data directories and log files, so if you have previously used the
same cluster plan you will lose all your data. Use the *relaunch* method
described below instead in that case.
```js
arangodb> var Kickstarter = require("@arangodb/cluster").Kickstarter;
arangodb> k = new Kickstarter(p.getPlan());
arangodb> k.launch();
```js
That is all you have to do, to fire up your first cluster. You will see some
output, which you can safely ignore (as long as no error happens).
From that point on, you can contact one of the coordinators and use the cluster
as if it were a single ArangoDB instance (use the port number from above
instead of 8530, if you get a different one) (probably from another
shell window):
```js
$ arangosh --server.endpoint tcp://localhost:8530
[... some output omitted]
arangosh [_system]> db._databases();
[
"_system"
]
```js
This for example, lists the cluster wide databases.
Now, let us create a sharded collection. Note, that we only have to specify
the number of shards to use in addition to the usual command.
The shards are automatically distributed among your DBservers:
```js
arangosh [_system]> example = db._create("example",{numberOfShards:6});
[ArangoCollection 1000001, "example" (type document, status loaded)]
arangosh [_system]> x = example.save({"name":"Hans", "age":44});
{
"error" : false,
"_id" : "example/1000008",
"_rev" : "13460426",
"_key" : "1000008"
}
arangosh [_system]> example.document(x._key);
{
"age" : 44,
"name" : "Hans",
"_id" : "example/1000008",
"_rev" : "13460426",
"_key" : "1000008"
}
```js
You can shut down your cluster by using the following Kickstarter
method (in the ArangoDB console):
```js
arangodb> k.shutdown();
```
If you want to start your cluster again without losing data you have
previously stored in it, you can use the *relaunch* method in exactly the
same way as you previously used the *launch* method:
```js
arangodb> k.relaunch();
```
**Note**: If you have destroyed the object *k* for example because you
have shutdown the ArangoDB instance in which you planned the cluster,
then you can reproduce it for a *relaunch* operation, provided you have
kept the cluster plan object provided by the *getPlan* method. If you
had for example done:
```js
arangodb> var plan = p.getPlan();
arangodb> require("fs").write("saved_plan.json",JSON.stringify(plan));
```
Then you can later do (in another session):
```js
arangodb> var plan = require("fs").read("saved_plan.json");
arangodb> plan = JSON.parse(plan);
arangodb> var Kickstarter = require("@arangodb/cluster").Kickstarter;
arangodb> var k = new Kickstarter(plan);
arangodb> k.relaunch();
```
to start the existing cluster anew.
You can check, whether or not, all your cluster processes are still
running, by issuing:
```js
arangodb> k.isHealthy();
```
This will show you the status of all processes in the cluster. You
should see "RUNNING" there, in all the relevant places.
Finally, to clean up the whole cluster (losing all the data stored in
it), do:
```js
arangodb> k.shutdown();
arangodb> k.cleanup();
```
We conclude this section with another example using two machines, which
will act as two dispatchers. We start from scratch using two machines,
running on the network addresses *tcp://192.168.173.78:8529* and
*tcp://192.168.173.13:6789*. Both need to have a regular ArangoDB
instance installed and running. Please make sure, that both bind to
all network devices, so that they can talk to each other. Also enable
the dispatcher functionality on both of them, as described above.
```js
arangodb> var Planner = require("@arangodb/cluster").Planner;
arangodb> var p = new Planner({
dispatchers: {"me":{"endpoint":"tcp://192.168.173.78:8529"},
"theother":{"endpoint":"tcp://192.168.173.13:6789"}},
"numberOfCoordinators":2, "numberOfDBservers": 2});
```
With these commands, you create a cluster plan involving two machines.
The planner will put one DBserver and one Coordinator on each machine.
You can now launch this cluster exactly as explained earlier:
```js
arangodb> var Kickstarter = require("@arangodb/cluster").Kickstarter;
arangodb> k = new Kickstarter(p.getPlan());
arangodb> k.launch();
```
Likewise, the methods *shutdown*, *relaunch*, *isHealthy* and *cleanup*
work exactly as in the single server case.
See [the corresponding chapter of the reference manual](../ModulePlanner/README.md)
for detailed information about the *Planner* and *Kickstarter* classes.

View File

@ -14,48 +14,3 @@ coordinators exactly as they would talk to a single ArangoDB instance
via the REST interface. The coordinators know about the configuration of
the cluster and automatically forward the incoming requests to the
right DBservers.
As a central highly available service to hold the cluster configuration
and to synchronize reconfiguration and fail-over operations we currently
use a an external program called *etcd* (see [Github
page](https://github.com/coreos/etcd)). It provides a hierarchical
key value store with strong consistency and reliability promises.
This is called the "agency" and its processes are called "agents".
All this is admittedly a relatively complicated setup and involves a lot
of steps for the startup and shutdown of clusters. Therefore we have created
convenience functionality to plan, setup, start and shutdown clusters.
The whole process works in two phases, first the "planning" phase and
then the "running" phase. In the planning phase it is decided which
processes with which roles run on which machine, which ports they use,
where the central agency resides and what ports its agents use. The
result of the planning phase is a "cluster plan", which is just a
relatively big data structure in JSON format. You can then use this
cluster plan to startup, shutdown, check and cleanup your cluster.
This latter phase uses so-called "dispatchers". A dispatcher is yet another
ArangoDB instance and you have to install exactly one such instance on
every machine that will take part in your cluster. No special
configuration whatsoever is needed and you can organize authentication
exactly as you would in a normal ArangoDB instance. You only have
to activate the dispatcher functionality in the configuration file
(see options *cluster.disable-dispatcher-kickstarter* and
*cluster.disable-dispatcher-interface*, which are both initially
set to *true* in the standard setup we ship).
However, you can use any of these dispatchers to plan and start your
cluster. In the planning phase, you have to tell the planner about all
dispatchers in your cluster and it will automatically distribute your
agency, DBserver and coordinator processes amongst the dispatchers. The
result is the cluster plan which you feed into the kickstarter. The
kickstarter is a program that actually uses the dispatchers to
manipulate the processes in your cluster. It runs on one of the
dispatchers, which analyses the cluster plan and executes those actions,
for which it is personally responsible, and forwards all other actions
to the corresponding dispatchers. This is possible, because the cluster
plan incorporates the information about all dispatchers.
We also offer a graphical user interface to the cluster planner and
dispatcher.

View File

@ -78,7 +78,7 @@ road map):
maintenance and scaling of a cluster. However, in version 2.0 the
cluster layout is static and no redistribution of data between the
DBservers or moving of shards between servers is possible.
* At this stage the sharding of an [edge collection](../Glossary/README.md#edge-collection) is independent of
* At this stage the sharding of an [edge collection](../Appendix/Glossary.md#edge-collection) is independent of
the sharding of the corresponding vertex collection in a graph.
For version 2.2 we plan to synchronize the two, to allow for more
efficient graph traversal functions in large, sharded graphs. We
@ -137,16 +137,6 @@ to implement efficiently:
to be the revision of the latest inserted document. Again,
maintaining a global revision number over all shards is very
difficult to maintain efficiently.
* The methods *db.<collection>.first()* and *db.<collection>.last()* are
unsupported for collections with more than one shard. The reason for
this, is that temporal order in a highly parallelized environment
like a cluster is difficult or even impossible to achieve
efficiently. In a cluster it is entirely possible that two
different coordinators add two different documents to two
different shards *at the same time*. In such a situation it is not
even well-defined which of the two documents is "later". The only
way to overcome this fundamental problem would again be a central
locking mechanism, which is not desirable for performance reasons.
* Contrary to the situation in a single instance, objects representing
sharded collections are broken after their database is dropped.
In a future version they might report that they are broken, but

View File

@ -10,7 +10,7 @@ To upgrade an existing ArangoDB database to a newer version of ArangoDB
(e.g. 3.0 to 3.1, or 3.3 to 3.4), the following method is recommended:
* Check the *CHANGELOG* and the
[list of incompatible changes](../Upgrading/UpgradingChanges28.md) for API or
[list of incompatible changes](../../ReleaseNotes/UpgradingChanges28.md) for API or
other changes in the new version of ArangoDB and make sure your applications
can deal with them
* Stop the "old" arangod service or binary

View File

@ -1,7 +1,7 @@
!CHAPTER Upgrading to ArangoDB 2.3
Please read the following sections if you upgrade from a previous version to
ArangoDB 2.3. Please be sure that you have checked the list of [changes in 2.3](../Upgrading/UpgradingChanges23.md)
ArangoDB 2.3. Please be sure that you have checked the list of [changes in 2.3](../../ReleaseNotes/UpgradingChanges23.md)
before upgrading.
Please note first that a database directory used with ArangoDB 2.3

View File

@ -1,7 +1,7 @@
!CHAPTER Upgrading to ArangoDB 2.4
Please read the following sections if you upgrade from a previous version to
ArangoDB 2.4. Please be sure that you have checked the list of [changes in 2.4](../Upgrading/UpgradingChanges24.md)
ArangoDB 2.4. Please be sure that you have checked the list of [changes in 2.4](../../ReleaseNotes/UpgradingChanges24.md)
before upgrading.
Please note first that a database directory used with ArangoDB 2.4

View File

@ -1,7 +1,7 @@
!CHAPTER Upgrading to ArangoDB 2.5
Please read the following sections if you upgrade from a previous version to
ArangoDB 2.5. Please be sure that you have checked the list of [changes in 2.5](../Upgrading/UpgradingChanges25.md)
ArangoDB 2.5. Please be sure that you have checked the list of [changes in 2.5](../../ReleaseNotes/UpgradingChanges25.md)
before upgrading.
Please note first that a database directory used with ArangoDB 2.5

View File

@ -1,7 +1,7 @@
!CHAPTER Upgrading to ArangoDB 2.6
Please read the following sections if you upgrade from a previous version to
ArangoDB 2.6. Please be sure that you have checked the list of [changes in 2.6](../Upgrading/UpgradingChanges26.md)
ArangoDB 2.6. Please be sure that you have checked the list of [changes in 2.6](../../ReleaseNotes/UpgradingChanges26.md)
before upgrading.
Please note first that a database directory used with ArangoDB 2.6

View File

@ -1,7 +1,7 @@
!CHAPTER Upgrading to ArangoDB 2.8
Please read the following sections if you upgrade from a previous version to
ArangoDB 2.8. Please be sure that you have checked the list of [changes in 2.8](../Upgrading/UpgradingChanges28.md)
ArangoDB 2.8. Please be sure that you have checked the list of [changes in 2.8](../../ReleaseNotes/UpgradingChanges28.md)
before upgrading.
Please note first that a database directory used with ArangoDB 2.8
@ -110,7 +110,7 @@ with the cluster.
!SECTION Upgrading Foxx apps generated by ArangoDB 2.7 and earlier
The implementation of the `require` function used to import modules in
ArangoDB and Foxx [has changed](./UpgradingChanges28.md#module-resolution)
ArangoDB and Foxx [has changed](../../ReleaseNotes/UpgradingChanges28.md#module-resolution)
in order to improve compatibility with Node.js modules.
Given an app/service with the following layout:

View File

@ -2,6 +2,6 @@
Please read the following sections if you upgrade from a previous
version to ArangoDB 3.0. Please be sure that you have checked the list
of [changes in 3.0](../Upgrading/UpgradingChanges30.md) before
of [changes in 3.0](../../ReleaseNotes/UpgradingChanges30.md) before
upgrading.

View File

@ -363,7 +363,7 @@ will appear.*
Using single routes or [bundles](#routing-bundles) can be
become a bit messy in large applications. Kaerus has written a [deployment tool](https://github.com/kaerus/arangodep) in node.js.
Note that there is also [Foxx](../Foxx/README.md) for building applications
Note that there is also [Foxx](../../../Foxx/README.md) for building applications
with ArangoDB.
!SECTION Common Pitfalls when using Actions

View File

@ -36,7 +36,7 @@ HTML pages - static or dynamic. A simple example is the built-in administration
interface. You can access it using any modern browser and there is no need for a
separate Apache or IIS.
In general you will use [Foxx](../Foxx/README.md) to easily extend the database with
In general you will use [Foxx](../../../Foxx/README.md) to easily extend the database with
business logic. Foxx provides an simple to use interface to actions.
The following sections will explain the low-level actions within ArangoDB on

View File

@ -1,7 +1,7 @@
!CHAPTER Fulltext queries
ArangoDB allows to run queries on text contained in document attributes. To use
this, a [fulltext index](../Glossary/README.md#fulltext-index) must be defined for the attribute of the collection that
this, a [fulltext index](../Appendix/Glossary.md#fulltext-index) must be defined for the attribute of the collection that
contains the text. Creating the index will parse the text in the specified
attribute for all documents of the collection. Only documents will be indexed
that contain a textual value in the indexed attribute. For such documents, the
@ -25,7 +25,7 @@ Details about the fulltext query syntax can be found below.
Note: the *fulltext* simple query function is **deprecated** as of ArangoDB 2.6.
The function may be removed in future versions of ArangoDB. The preferred
way for executing fulltext queries is to use an AQL query using the *FULLTEXT*
[AQL function](../Aql/FulltextFunctions.md) as follows:
[AQL function](../../../../AQL/Functions/Fulltext.html) as follows:
FOR doc IN FULLTEXT(@@collection, @attributeName, @queryString, @limit)
RETURN doc

View File

@ -50,6 +50,6 @@ should be used in a query.
!SUBSECTION Related topics
Other ArangoDB geographic features are described in:
- [AQL Geo functions](../Aql/GeoFunctions.md)
- [Geo indexes](../IndexHandling/Geo.md)
- [AQL Geo functions](../../../../AQL/Functions/Geo.html)
- [Geo indexes](../../../Indexing/Geo.md)

View File

@ -11,8 +11,8 @@ modify lots of documents in a collection.
All methods can optionally be restricted to a specific number of operations.
However, if a limit is specific but is less than the number of matches, it
will be undefined which of the matching documents will get removed/modified.
[Remove by Example](../Documents/DocumentMethods.md#remove-by-example),
[Replace by Example](../Documents/DocumentMethods.md#replace-by-example) and
[Update by Example](../Documents/DocumentMethods.md#update-by-example)
[Remove by Example](../../../DataModeling/Documents/DocumentMethods.md#remove-by-example),
[Replace by Example](../../../DataModeling/Documents/DocumentMethods.md#replace-by-example) and
[Update by Example](../../../DataModeling/Documents/DocumentMethods.md#update-by-example)
are described with examples in the subchapter
[Collection Methods](../Documents/DocumentMethods.md).
[Collection Methods](../../../DataModeling/Documents/DocumentMethods.md).

View File

@ -8,4 +8,4 @@ queries, which you can use within the ArangoDB shell and within actions and
transactions. For other languages see the corresponding language API
documentation.
You can find a list of queries at [Collection Methods](../Documents/DocumentMethods.md).
You can find a list of queries at [Collection Methods](../../../DataModeling/Documents/DocumentMethods.md).

View File

@ -16,7 +16,7 @@ opaque strings when they store or use it locally.
!SUBSECTION Collection Name
A collection name identifies a collection in a database. It is a string and is unique within the database. Unlike the collection identifier it is supplied by the creator of the collection. The collection name must consist of letters, digits, and the _ (underscore) and - (dash) characters only. Please refer to [NamingConventions](../NamingConventions/CollectionNames.md) for more information on valid collection names.
A collection name identifies a collection in a database. It is a string and is unique within the database. Unlike the collection identifier it is supplied by the creator of the collection. The collection name must consist of letters, digits, and the _ (underscore) and - (dash) characters only. Please refer to [NamingConventions](../DataModeling/NamingConventions/CollectionNames.md) for more information on valid collection names.
!SUBSECTION Database
@ -26,7 +26,7 @@ A database contains its own collections (which cannot be accessed from other dat
There will always be at least one database in ArangoDB. This is the default database, named _system. This database cannot be dropped, and provides special operations for creating, dropping, and enumerating databases. Users can create additional databases and give them unique names to access them later. Database management operations cannot be initiated from out of user-defined databases.
When ArangoDB is accessed via its HTTP REST API, the database name is read from the first part of the request URI path (e.g. /_db/_system/...). If the request URI does not contain a database name, the database name is automatically derived from the endpoint. Please refer to [DatabaseEndpoint](../HttpDatabase/DatabaseEndpoint.md) for more information.
When ArangoDB is accessed via its HTTP REST API, the database name is read from the first part of the request URI path (e.g. /_db/_system/...). If the request URI does not contain a database name, the database name is automatically derived from the endpoint. Please refer to [DatabaseEndpoint](../../HTTP/Database/DatabaseEndpoint.html) for more information.
!SUBSECTION Database Name
@ -102,7 +102,7 @@ Using `keyOptions` it is possible to disallow user-specified keys completely, or
There are some restrictions for user-defined
keys (see
[NamingConventions for document keys](../NamingConventions/DocumentKeys.md)).
[NamingConventions for document keys](../DataModeling/NamingConventions/DocumentKeys.md)).
!SUBSECTION Document Revision

View File

@ -1,4 +1,5 @@
!CHAPTER Module "queries"
The query module provides the infrastructure for working with currently running AQL queries via arangosh.
@ -15,7 +16,8 @@ The query module provides the infrastructure for working with currently running
!SUBSECTION Currently running queries
We [create a task](../ModuleTasks/README.md) that spawns queries, so we have nice output. Since this task
We [create a task](Tasks.md) that spawns queries, so we have nice output. Since this task
uses resources, you may want to increase `period` (and not forget to remove it... afterwards):
@startDocuBlockInline QUERY_02_listQueries

View File

@ -1,5 +1,6 @@
!SECTION Serverside db-Object implementation
We [already talked about the arangosh db Object implementation](../FirstSteps/Arangosh.md), Now a little more about the server version, so the following examples won't work properly in arangosh.
We [already talked about the arangosh db Object implementation](../GettingStarted/Arangosh.md), Now a little more about the server version, so the following examples won't work properly in arangosh.
Server-side methods of the *db object* will return an `[object ShapedJson]`. This datatype is a very lightweight JavaScript object that contains an internal pointer to where the document data are actually stored in memory or on disk. Especially this is not a fullblown copy of the document's complete data.

View File

@ -33,4 +33,4 @@ already collected write-ahead logfiles so replication slaves still can fetch dat
them if required. How many collected logfiles will be kept before they get deleted is
configurable via the option *--wal.historic-logfiles*.
For all write-ahead log configuration options, please refer to the page [Write-ahead log options](../ConfigureArango/Wal.md).
For all write-ahead log configuration options, please refer to the page [Write-ahead log options](../Administration/Configuration/Wal.md).

View File

@ -13,14 +13,14 @@ name. ArangoDB internally uses the collection's unique identifier to look up
collections. This identifier, however, is managed by ArangoDB and the user has
no control over it. In order to allow users to use their own names, each collection
also has a unique name which is specified by the user. To access a collection
from the user perspective, the [collection name](../Glossary/README.md#collection-name) should be used, i.e.:
from the user perspective, the [collection name](../Appendix/Glossary.md#collection-name) should be used, i.e.:
!SUBSECTION Collection
`db._collection(collection-name)`
A collection is created by a ["db._create"](../Collections/DatabaseMethods.md) call.
A collection is created by a ["db._create"](DatabaseMethods.md) call.
For example: Assume that the [collection identifier](../Glossary/README.md#collection-identifier) is *7254820* and the name is
For example: Assume that the [collection identifier](../../Appendix/Glossary.md#collection-identifier) is *7254820* and the name is
*demo*, then the collection can be accessed as:
db._collection("demo")
@ -43,4 +43,4 @@ use
`db._create(collection-name)`
This call will create a new collection called *collection-name*.
This method is a database method and is documented in detail at [Database Methods](../Collections/DatabaseMethods.md#create)
This method is a database method and is documented in detail at [Database Methods](DatabaseMethods.md#create)

View File

@ -32,11 +32,11 @@ For example:
```
All documents contain special attributes: the
[document handle](../../Users/Appendix/Glossary.md#document-handle) is stored
[document handle](../../Appendix/Glossary.md#document-handle) is stored
as a string in `_id`, the
[document's primary key](../../Users/Appendix/Glossary.md#document-key) in
[document's primary key](../../Appendix/Glossary.md#document-key) in
`_key` and the
[document revision](../../Users/Appendix/Glossary.md#document-revision) in
[document revision](../../Appendix/Glossary.md#document-revision) in
`_rev`. The value of the `_key` attribute can be specified by the user when
creating a document. `_id` and `_key` values are immutable once the document
has been created. The `_rev` value is maintained by ArangoDB automatically.

View File

@ -0,0 +1,3 @@
!CHAPTER Static file assets
TODO

View File

@ -0,0 +1,120 @@
!CHAPTER Authentication
`const createAuth = require('@arangodb/foxx/auth');`
Authenticators allow implementing basic password mechanism using simple built-in hashing functions.
**Examples**
```js
const auth = createAuth();
// Use the sessions middleware
const sessionsMiddleware = require('@arangodb/foxx/sessions');
const sessions = sessionsMiddleware({
storage: module.context.collection('sessions'),
transports: 'cookie'
});
module.context.use(sessions);
// Create a router
const createRouter = require('@arangodb/foxx/router');
const router = createRouter();
module.context.use(router);
// Define a login route
const joi = require('joi');
const users = module.context.collection('users');
router.post('/login', function (req, res) {
let user = users.firstExample({
username: req.body.username
});
const valid = auth.verify(
user ? user.authData : {},
req.body.password
);
if (!valid) res.throw('unauthorized');
req.session.uid = user._key;
req.sessionStorage.save(req.session);
res.send({sucess: true});
})
.body(joi.object({
username: joi.string().required(),
password: joi.string().required()
}).required(), 'Credentials');
```
!SECTION Creating an authenticator
`auth([options]): Authenticator`
Creates an authenticator.
**Arguments**
* **options**: `Object` (optional)
An object with the following properties:
* **method**: `string` (Default: `"sha256"`)
The hashing algorithm to use to create password hashes. The authenticator will be able to verify passwords against hashes using any supported hashing algorithm. This only affects new hashes created by the authenticator.
Supported values:
* `"md5"`
* `"sha1"`
* `"sha224"`
* `"sha256"`
* `"sha384"`
* `"sha512"`
* **saltLength**: `number` (Default: `16`)
Length of the salts that will be generated for password hashes.
Returns an authenticator.
!SECTION Creating hash objects
`auth.create(password): Hash`
Creates a hash object for the given password with the following properties:
* **method**: `string`
The method used to generate the hash.
* **salt**: `string`
A random salt used to generate this hash.
* **hash**: `string`
The hash string itself.
**Arguments**
* **password**: `string`
A password to hash.
Returns the hash object.
!SECTION Validating passwords against hash objects
`auth.verify([hash, [password]]): boolean`
Verifies the given password against the given hash using a constant time string comparison.
**Arguments**
* **hash**: `Hash` (optional)
A hash object generated with the *create* method.
* **password**: `string` (optional)
A password to verify against the hash.
Returns `true` if the hash matches the given password. Returns `false` otherwise.

View File

@ -1,18 +0,0 @@
!SECTION Installing Foxx on a cluster
The Foxx installation process is agnostic of the cluster architecture.
You simply use the foxx manager as described [earlier](../Install/README.md) and connect it to any of your coordinators.
This will automatically spread the Foxx across all coordinators in the cluster.
!SUBSECTION Development Mode
Within a Cluster the development mode of Foxx is totally unsupported. The spreading of information will not be executed on each change on disc.
Please develop your Foxx in a single server development environment and upload it to the cluster in production mode.
!SUBSECTION Restrictions
The code you write in Foxx should be independent from the server it is running on.
This means you should avoid using the file-system module `fs` as this is writing on the local file system.
This will get out of sync if you use more than one server.
If you install 3rd party libraries please make sure they do not rely on the local file system.
All other operations should be save and the code for single server and cluster is identical.

View File

@ -1,5 +0,0 @@
!CHAPTER Foxx in the Cluster
This chapter is about running Foxx in a cluster of ArangoDBs.
There is no major conceptual difference from running Foxx in a cluster compared to running Foxx on a single machine.
It is just a couple of minor things to consider.

View File

@ -1,56 +0,0 @@
!CHAPTER Administrative Scripts in Cluster
If you want to scale the dataset you access via Foxx you have to take sharding into account.
This results in minor changes to your setup and teardown scripts.
These will be executed on all servers you deploy your Foxx to, hence you have to consider that they will be executed multiple times.
Also the setup for collections now requires you to give the amount of shards for the collection (recommended is the amount of servers squared).
!SECTION Setup script
This script has to take into consideration that collections might have been installed by other servers already.
Also you have to give the amount of shards (9 in this example):
```
var console = require("console");
var arangodb = require("@arangodb");
var db = arangodb.db;
var texts = applicationContext.collectionName("texts");
if (db._collection(texts) === null) {
// This is the first one running the script
var collection = db._create(texts, {
numberOfShards: 9
});
collection.save({ text: "entry 1 from collection texts" });
collection.save({ text: "entry 2 from collection texts" });
collection.save({ text: "entry 3 from collection texts" });
}
else {
console.log("collection '%s' already exists. Leaving it untouched.", texts);
}
```
!SECTION Teardown script
Also this script has to take into account that it might be run several times.
You can also omit the teardown execution with passing `teardown:false` options to the uninstall process.
```
unix>foxx-manager uninstall /example teardown=false
```
The teardown script just has to check if the collection is not yet dropped:
```
var arangodb = require("@arangodb");
var db = arangodb.db;
var texts = applicationContext.collectionName("texts");
var collection = db._collection(texts);
if (collection !== null) {
// Not yet dropped. Drop it now
collection.drop();
}
```

View File

@ -0,0 +1,3 @@
!CHAPTER Foxx configuration
TODO

View File

@ -0,0 +1,167 @@
!CHAPTER Foxx service context
TODO
The service context specifies the following properties:
* **argv**: `any`
Any arguments passed in if the current file was executed as a script or queued job. For more information see the [chapter on scripts and queued jobs][SCRIPTS].
* **basePath**: `string`
The file system path of the service, i.e. the folder in which the service was installed by ArangoDB.
* **baseUrl**: `string`
The base URL of the service, relative to the ArangoDB server, e.g. `/_db/_system/my-foxx`.
* **collectionPrefix**: `string`
The prefix that will be used by *collection* and *collectionName* to derive the names of service-specific collections. This is derived from the service's mount point, e.g. `/my-foxx` becomes `my_foxx`.
* **configuration**: `Object`
TODO
* **dependencies**: `Object`
TODO
* **isDevelopment**: `boolean`
TODO
* **isProduction**: `boolean`
The inverse of *isDevelopment*.
* **manifest**: `Object`
TODO
* **mount**: `string`
The mount point of the service, e.g. `/my-foxx`.
!SECTION apiDocumentation
`module.context.apiDocumentation([options]): Function`
TODO
**Arguments**
* **options**: `Object` (optional)
TODO
TODO
!SECTION collection
`module.context.collection(name): ArangoCollection | null`
Passes the given name to *collectionName*, then looks up the collection with the prefixed name.
**Arguments**
* **name**: `string`
Unprefixed name of the service-specific collection.
Returns a collection or `null` if no collection with the prefixed name exists.
!SECTION collectionName
`module.context.collectionName(name): string`
Prefixes the given name with the *collectionPrefix* for this service.
**Arguments**
* **name**: `string`
Unprefixed name of the service-specific collection.
Returns the prefixed name.
**Examples**
```js
module.context.mount === '/my-foxx'
module.context.collectionName('doodads') === 'my_foxx_doodads'
```
!SECTION file
`module.context.file(name, [encoding]): Buffer | string`
Passes the given name to *fileName*, then loads the file with the resulting name.
**Arguments**
* **name**: `string`
Name of the file to load, relative to the current service.
* **encoding**: `string` (optional)
Encoding of the file, e.g. `utf-8`. If omitted the file will be loaded as a raw buffer instead of a string.
Returns the file's contents.
!SECTION fileName
`module.context.fileName(name): string`
Resolves the given file name relative to the current service.
**Arguments**
* **name**: `string`
Name of the file, relative to the current service.
Returns the absolute file path.
!SECTION registerType
`module.context.registerType(type, def): void`
TODO
**Arguments**
* **type**: `string`
TODO
* **def**: `Object`
TODO
TODO
!SECTION use
`module.context.use([path], router): Endpoint`
Mounts a given router on the service to expose the router's routes on the service's mount point.
**Arguments**
* **path**: `string` (Default: `"/"`)
Path to mount the router at, relative to the service's mount point.
* **router**: `Router | Middleware`
A router or middleware to mount.
Returns the [Endpoint][ENDPOINTS] for the given router or middleware.
**Note**: Mounting services at run time (e.g. within request handlers or queued jobs) is not supported.
[SCRIPTS]: ./Scripts.md
[ENDPOINTS]: ./Router/Endpoints.md

View File

@ -0,0 +1,19 @@
!CHAPTER Dependency management
There are two things commonly called "dependencies" in Foxx:
* Module dependencies, i.e. dependencies on external JavaScript modules (e.g. from the public npm registry)
* Foxx dependencies, i.e. dependencies between Foxx services
Let's look at them in more detail:
!SECTION Module dependencies
You can use the `node_modules` folder to bundle third-party Foxx-compatible npm and Node.js modules with your Foxx service. Typically this is achieved by adding a `package.json` file to your project specifying npm dependencies using the `dependencies` attribute and installing them with the npm command-line tool.
Make sure to include the actual `node_modules` folder in your Foxx service bundle as ArangoDB will not do anything special to install these dependencies. Also keep in mind that bundling extraneous modules like development dependencies may bloat the file size of your Foxx service bundle.
!SECTION Foxx dependencies
TODO

View File

@ -1,70 +0,0 @@
!CHAPTER Foxx API Documentation
In addition to viewing the API documentation of any Foxx app in the admin frontend, you can also mount the API
documentation inside your own Foxx app. This allows you to serve your documentation to users without having to
give them access to the admin frontend or any other parts of ArangoDB.
!SECTION Mounting the API documentation
<!-- js/server/modules/@arangodb/foxx/controller.js -->
`Controller.apiDocumentation(path, [opts])`
Mounts the API documentation (Swagger) at the given `path`.
Note that the `path` can use path parameters as usual but must not use any
wildcard (`*`) or optional (`:name?`) parameters.
The optional **opts** can be an object with any of the following properties:
* **before**: a function that will be executed before a request to
this endpoint is processed further.
* **appPath**: the mount point of the app for which documentation will be
shown. Default: the mount point of the active app.
* **indexFile**: file path or file name of the Swagger HTML file.
Default: `"index.html"`.
* **swaggerJson**: file path or file name of the Swagger API description JSON
file or a function `swaggerJson(req, res, opts)` that sends a Swagger API
description in JSON. Default: the built-in Swagger description generator.
* **swaggerRoot**: absolute path that will be used as the path path for any
relative paths of the documentation assets, **swaggerJson** file and
the **indexFile**. Default: the built-in Swagger distribution.
If **opts** is a function, it will be used as the value of **opts.before**.
If **opts.before** returns `false`, the request will not be processed
further.
If **opts.before** returns an object, any properties will override the
equivalent properties of **opts** for the current request.
Of course all **before**, **after** or **around** functions defined on the
controller will also be executed as usual.
**Examples**
```js
controller.apiDocumentation('/my/dox');
```
A request to `/my/dox` will be redirect to `/my/dox/index.html`,
which will show the API documentation of the active app.
```js
controller.apiDocumentation('/my/dox', function (req, res) {
if (!req.session.get('uid')) {
res.status(403);
res.json({error: 'only logged in users may see the API'});
return false;
}
return {appPath: req.parameters.mount};
});
```
A request to `/my/dox/index.html?mount=/_admin/aardvark` will show the
API documentation of the admin frontend (mounted at `/_admin/aardvark`).
If the user is not logged in, the error message will be shown instead.

View File

@ -1,302 +0,0 @@
!CHAPTER Foxx console
Foxx injects a **console** object into each Foxx app that allows writing log entries to the database in addition to the ArangoDB log file and querying them from within the app itself.
The **console** object supports the CommonJS Console API found in Node.js and modern browsers, while also providing some ArangoDB-specific additions.
ArangoDB also provides [the `console` module](../../ModuleConsole/README.md) which only supports the CommonJS Console API and only writes log entries to the ArangoDB log.
When working with transactions, keep in mind that the Foxx console will attempt to write to the `_foxxlog` system collection. This behaviour can be disabled using the `setDatabaseLogging` method if you don't want to explicitly allow writing to the log collection during transactions or for performance reasons.
!SECTION Logging
!SUBSECTION Logging console messages
Write an arbitrary message to the app's log.
`console.log([format,] ...parameters)`
Applies `util.format` to the arguments and writes the resulting string to the app's log.
If the first argument is not a formatting string or any of the additional arguments are not used by the formatting string, they will be concatenated to the result string.
**Examples**
```js
console.log("%s, %s!", "Hello", "World"); // => "Hello, World!"
console.log("%s, World!", "Hello", "extra"); // => "Hello, World! extra"
console.log("Hello,", "beautiful", "world!"); // => "Hello, beautiful world!"
console.log(1, 2, 3); // => "1 2 3"
```
!SUBSECTION Logging with different log levels
The **console** object provides additional methods to log messages with different log levels:
* `console.debug` for log level **DEBUG**
* `console.info` for log level **INFO**
* `console.warn` for log level **WARN**
* `console.error` for log level **ERROR**
By default, `console.log` uses log level **INFO**, making it functionally equivalent to `console.info`. Other than the log level, all of these methods behave identically to `console.log`.
The built-in log levels are:
* -200: **TRACE**
* -100: **DEBUG**
* 0: **INFO**
* 100: **WARN**
* 200: **ERROR**
!SUBSECTION Logging with timers
You can start and stop timers with labels.
`console.time(label)` / `console.timeEnd(label)`
Passing a label to `console.time` starts the timer. Passing the same label to `console.timeEnd` ends the timer and logs a message with the label and the expired time in milliseconds.
Calling `console.timeEnd` with an invalid label or calling it with the same label twice (without first starting a new timer with the same label) results in an error.
By default, the timing messages will be logged with log level **INFO**.
**Examples**
```js
console.time('do something');
// ... more code ...
console.time('do more stuff');
// ... even more code ...
console.timeEnd('do more stuff'); // => "do more stuff: 23ms"
// ... a little bit more ....
console.timeEnd('do something'); // => "do something: 52ms"
```
Don't do this:
```js
console.timeEnd('foo'); // fails: label doesn't exist yet.
console.time('foo'); // works
console.timeEnd('foo'); // works
console.timeEnd('foo'); // fails: label no longer exists.
```
!SUBSECTION Logging stack traces
You can explicitly log a message with a stack trace.
`console.trace(message)`
This creates a stack trace with the given message.
By default the stack traces will be logged with a log level of **TRACE**.
**Examples**
```js
console.trace('Hello');
/* =>
Trace: Hello
at somefile.js:1:1
at ...
*/
```
!SUBSECTION Logging assertions
This creates an assertion that will log an error if it fails.
`console.assert(statement, message)`
If the given **statement** is not true (e.g. evaluates to `false` if treated as a boolean), an `AssertionError` with the given message will be created and its stack trace will be logged.
By default, the stack trace will be logged with a log level of **ERROR** and the error will be discarded after logging it (instead of being thrown).
**Examples**
```js
console.assert(2 + 2 === 5, "I'm bad at maths");
/* =>
AssertionError: I'm bad at maths
at somefile.js:1:1
at ...
*/
```
!SUBSECTION Inspecting an object
This logs a more detailed string representation of a given object.
`console.dir(value)`
The regular logging functions try to format messages as nicely and readable as possible. Sometimes that may not provide you with all the information you actually want.
The **dir** method instead logs the result of `util.inspect`.
By default the message will be logged with the log level **INFO**.
**Examples**
```js
console.log(require('@arangodb').db); // => '[ArangoDatabase "_system"]'
console.dir(require('@arangodb').db);
/* =>
{ [ArangoDatabase "_system"]
_version: [Function: _version],
_id: [Function: _id],
...
}
*/
```
!SUBSECTION Custom log levels
This lets you define your own log levels.
`console.custom(name, value)`
If you need more than the built-in log levels, you can easily define your own.
This method returns a function that logs messages with the given log level (e.g. an equivalent to `console.log` that uses your custom log level instead).
**Parameter**
* **name**: name of the log level as it appears in the database, usually all-uppercase
* **value** (optional): value of the log level. Default: `50`
The **value** is used when determining whether a log entry meets the minimum log level that can be defined in various places. For a list of the built-in log levels and their values see the section on logging with different log levels above.
!SUBSECTION Preventing entries from being logged
You can define a minimum log level entries must match in order to be logged.
`console.setLogLevel(level)`
The **level** can be a numeric value or the case-sensitive name of an existing log level.
Any entries with a log level lower than the given value will be discarded silently.
This can be helpful if you want to toggle logging diagnostic messages in development mode that should not be logged in production.
The default log level is set to `-999`. For a list of built-in log levels and their values see the section on logging with different log levels.
!SUBSECTION Enabling extra stack traces
You can toggle the logging of stack trace objects for every log entry.
`console.setTracing(trace)`
If **trace** is set to `true`, all log entries will be logged with a parsed stack trace as an additional `stack` property that can be useful for identifying where the entry originated and how the code triggering it was called.
Because this results in every logging call creating a stack trace (which may have a significant performance impact), this option is disabled by default.
!SUBSECTION Disabling logging to the ArangoDB console
You can toggle whether logs should be written to the ArangoDB console.
`console.setNativeLogging(nativeLogging)`
If **nativeLogging** is set to `false`, log entries will not be logged to the ArangoDB console (which usually writes to the file system).
!SUBSECTION Disabling logging to the database
You can toggle whether logs should be written to the database.
`console.setDatabaseLogging(databaseLogging)`
If **databaseLogging** is set to `false`, log entries will not be logged to the internal `_foxxlog` collection.
This is only useful if logging to the ArangoDB console is not also disabled.
!SUBSECTION Enabling assertion errors
You can toggle whether console assertions should throw if they fail.
`console.setAssertThrows(assertThrows)`
If **assertThrows** is set to `true`, any failed assertions in `console.assert` will result in the generated error being thrown instead of being discarded after it is logged.
By default, this setting is disabled.
!SUBSECTION Changing the default log levels
Most of the logging methods have an implied log level that is set to a reasonable default. If you would like to have them use different log levels, you can easily change them.
* `console.log.level` defaults to **INFO**
* `console.dir.level` defaults to **INFO**
* `console.time.level` defaults to **INFO**
* `console.trace.level` defaults to **TRACE**
* `console.assert.level` defaults to **ERROR**
To use different log levels, just set these properties to the case-sensitive name of the desired log level.
**Examples**
```js
console.log('this uses "INFO"');
console.log.level = 'WARN';
console.log('now it uses "WARN"');
```
Known custom levels work, too:
```js
console.custom('BANANA', 23);
console.log.level = 'BANANA';
console.log('now it uses "BANANA"');
```
Unknown levels result in an error:
```js
console.log.level = 'POTATO';
console.log('this throws'); // => Error: Unknown log level: POTATO
```
!SECTION Querying a Foxx app's log entries
As the log entries are logged to a collection in the database, you can easily query them in your own application.
!SUBSECTION The logs object
The logs object can be found on the console itself:
`console.logs`
It provides three easy methods for querying the log entries:
* `logs.list([opts])`
* `logs.searchByMessage(message, [opts])`
* `logs.searchByFileName(fileName, [opts])`
Each method takes an optional `opts` argument, which can be an object with any of the following properties:
* **startTime**: the oldest time to include (in milliseconds). Default: 2 hours ago.
* **endTime**: the most recent time to include (in milliseconds).
* **level**: only return entries with this log level (name or value).
* **minLevel**: only return entries with this log level or above (name or value).
* **sort**: sorting direction of the result (**ASC** or **DESC** by time). Default: **ASC**.
* **limit**: maximum number of entries to return.
* **offset**: if **limit** is set, skip this many entries.
The default value for **startTime** can be changed by overriding `logs.defaultMaxAge` with a different time offset in milliseconds.
!SUBSUBSECTION Search by message
This lists all log entries with messages that contain the given token.
`logs.searchByMessage(message, [opts])`
This works like `logs.list` except it only returns log entries containing the given **message** part in their message.
!SUBSUBSECTION Search by file name
This lists all log entries with stack traces that contain the given token.
`logs.searchByFileName(fileName, [opts])`
This works like `logs.list` except it only returns log entries containing the given **fileName** part in one of the file names of their stack trace.
This method can only be used if the console has **tracing** enabled. See the section on enabling extra stack traces.
Note that entries that were logged while tracing was not enabled can not be found with this method because they don't have any parsed stack traces associated with them. This method does not search the log entries messages for the file name, so entries generated by `console.assert` or `console.trace` are not treated differently.

File diff suppressed because it is too large Load Diff

View File

@ -1,71 +0,0 @@
!CHAPTER Available Debugging mechanisms
We are talking about the development mode for Foxx.
Hence one of the most important parts will be debugging of your Foxx.
We have several mechanisms available to simplify this task.
During Foxx development we assume the following situation:
You have installed a syntactically valid version of a Foxx and want to develop it further.
You have activated the development mode for this route:
```
unix>foxx-manager development /example
Activated development mode for Application hello-foxx version 1.5.0 on mount point /example
```
!SECTION Errors during install
Now you apply changes to the source code of Foxx.
In development mode it is possible that you create a Foxx that could not be installed regularly.
If this is the case and you request any route of it you should receive a detailed error information:
```
unix>curl -X GET --dump - http://localhost:8529/_db/_system/example/failed
HTTP/1.1 500 Internal Error
Server: ArangoDB
Connection: Keep-Alive
Content-Type: application/json; charset=utf-8
Content-Length: 554
{"exception":"Error: App not found","stacktrace":["Error: App not found"," at Object.lookupApp (./js/server/modules/@arangodb/foxx/manager.js:99:13)"," at foxxRouting (./js/server/modules/@arangodb/actions.js:1040:27)"," at execute (./js/server/modules/@arangodb/actions.js:1291:7)"," at Object.routeRequest (./js/server/modules/@arangodb/actions.js:1312:3)"," at Function.actions.defineHttp.callback (js/actions/api-system.js:51:15)",""],"error":true,"code":500,"errorNum":500,"errorMessage":"failed to load foxx mounted at '/example'"}
```
!SECTION Errors in routes
If you have created a Foxx that can be regularly installed but has an unhandled error inside a route.
Triggering this route and entering the error case will return the specific error including a stack trace for you to hunt it down:
```
unix>curl -X GET http://localhost:8529/_db/_system/example/failed
HTTP/1.1 500 Internal Error
Server: ArangoDB
Connection: Keep-Alive
Content-Type: application/json
Content-Length: 917
{"error":"Unhandled Error","stack":"Error: Unhandled Error\n at fail (js/apps/_db/_system/example/APP/app.js:279:13)\n at js/apps/_db/_system/example/APP/app.js:284:5\n at Object.res.action.callback (./js/server/modules/@arangodb/foxx/internals.js:108:5)\n at ./js/server/modules/@arangodb/foxx/routing.js:346:19\n at execute (./js/server/modules/@arangodb/actions.js:1291:7)\n at next (./js/server/modules/@arangodb/actions.js:1308:7)\n at [object Object]:386:5\n at execute (./js/server/modules/@arangodb/actions.js:1291:7)\n at routeRequest (./js/server/modules/@arangodb/actions.js:1312:3)\n at foxxRouting (./js/server/modules/@arangodb/actions.js:1082:7)\n at execute (./js/server/modules/@arangodb/actions.js:1291:7)\n at Object.routeRequest (./js/server/modules/@arangodb/actions.js:1312:3)\n at Function.actions.defineHttp.callback (js/actions/api-system.js:51:15)\n"}
```
!SECTION Errors in logs
Independent of the errors presented in the routes on requests Fox will always log errors to the log-file if caught by the default error handlers.
The log entries will always contain stacktraces and error messages:
```
INFO /example, incoming request from 127.0.0.1: GET http://0.0.0.0:8529/example/failed
ERROR Error in foxx route '{ "match" : "/failed", "methods" : [ "get" ] }': 'Unhandled Error', Stacktrace: Error: Unhandled Error
ERROR at fail (js/apps/_db/_system/example/APP/app.js:279:13)
ERROR at js/apps/_db/_system/example/APP/app.js:284:5
ERROR at Object.res.action.callback (./js/server/modules/@arangodb/foxx/internals.js:108:5)
ERROR at ./js/server/modules/@arangodb/foxx/routing.js:346:19
ERROR at execute (./js/server/modules/@arangodb/actions.js:1291:7)
ERROR at next (./js/server/modules/@arangodb/actions.js:1308:7)
ERROR at [object Object]:386:5
ERROR at execute (./js/server/modules/@arangodb/actions.js:1291:7)
ERROR at routeRequest (./js/server/modules/@arangodb/actions.js:1312:3)
ERROR at foxxRouting (./js/server/modules/@arangodb/actions.js:1082:7)
ERROR at execute (./js/server/modules/@arangodb/actions.js:1291:7)
ERROR at Object.routeRequest (./js/server/modules/@arangodb/actions.js:1312:3)
ERROR at Function.actions.defineHttp.callback (js/actions/api-system.js:51:15)
ERROR
INFO /example, outgoing response with status 500 of type application/json, body length: 917
```

View File

@ -1,46 +0,0 @@
!CHAPTER Application Development Mode
This chapter describes the development mode for Foxx applications.
It is only useful if you have write access to the files in ArangoDB's application folder.
The folder is printed in the web interface for development Foxxes and can be reconfigured using the startup option `--javascript.app-path`.
If you do not have access to this folder, e.g. using a hosted service like [myArangoDB](https://myarangodb.com/), you cannot make use of this feature.
You will have to stick to the procedure described in [New Versions](../Production/Upgrade.md).
<div class="versionDifference">
Before 2.5 the startup option `--javascript.dev-app-path` was required for the development mode.
This caused a lot of confusion and introduced problems when moving from development to production.
So we decided to unify both app paths and activate development mode for specific Apps during runtime.
The `--javascript.dev-app-path` parameter is not having any effect any more.
</div>
!SECTION Activation
Activating the development mode is done with a single command:
```
unix> foxx-manager development /example
Activated development mode for Application hello-foxx version 1.5.0 on mount point /example
```
Now the app will now be listed in **listDevelopment**:
```
unix> foxx-manager listDevelopment
Mount Name Author Description Version Development
--------- ----------- ------------- ----------------------------------------- -------- ------------
/example hello-foxx Frank Celler This is 'Hello World' for ArangoDB Foxx. 1.5.0 true
--------- ----------- ------------- ----------------------------------------- -------- ------------
1 application(s) found
```
!SECTION Effects
For a Foxx application in development mode the following effects apply:
**Reload on request**
Whenever a request is routed to this application its source is reloaded.
This means all requests are slightly slower than in production mode.
But you will get immediate live updates on your code changes.
**Exposed Debugging information**
This application will deliver error messages and stack traces to requesting client.
For more information see the [Debugging](Debugging.md) section.

View File

@ -1,153 +0,0 @@
!CHAPTER Working with Foxx exports
Instead of (or in addition to) defining controllers, Foxx apps can also define exports.
Foxx exports are not intended to replace regular NPM modules. They simply allow you to make your app's collections and **applicationContext** available in other Foxx apps or bundling ArangoDB-specific modules in re-usable Foxx apps.
!SECTION Define an export module
In order to export modules in a Foxx app you need to list the files in your manifest:
```json
{
"name": "foxx_exports_example",
"version": "1.0.0",
"description": "Demonstrates Foxx exports.",
"exports": {
"doodads": "./doodads.js",
"anotherModule": "./someOtherFilename.js"
},
"controllers": {
"/etc": "./controllers.js"
}
}
```
The file **doodads.js** in the app's base path could look like this:
```js
var Foxx = require('@arangodb/foxx');
class Doodad extends Foxx.Model {}
var doodadRepo = new Foxx.Repository(
applicationContext.collection('doodads'),
{model: Doodad}
);
exports.repo = doodadRepo;
exports.model = Doodad;
// or simply
module.exports = {
repo: doodadRepo,
model: Doodad
};
```
This module would then export the name "repo" bound to the variable **doodads** as well as the name "model" bound to the **Doodad** model.
**Note**: The **applicationContext** is available to your Foxx exports just like in your Foxx controllers.
**Note**: Node.js style exports by assigning values directly to the `module.exports` property are supported, too.
!SECTION Import from another app
In order to import from another app, you need to know where the app is mounted.
Let's say we have mounted the example app above at **/my-doodads**. We could now access the app's exports in another app like so:
```js
var Foxx = require('@arangodb/foxx');
var doodads = Foxx.requireApp('/my-doodads').doodads;
var Doodad = doodads.model;
var doodadRepo = doodads.repo;
// use the imported model and repository
var myDoodad = new Doodad();
doodadRepo.save(myDoodad);
```
!SECTION Default exports
If you want more control over the object other apps receive when they load your app using **Foxx.requireApp**, you can specify a single filename instead of an object in your manifest file:
```json
{
"name": "foxx_exports_example",
"version": "1.0.0",
"description": "Demonstrates Foxx exports.",
"exports": "./exports.js",
"controllers": {
"/etc": "./controllers.js"
}
}
```
To replicate the same behavior as in the earlier example, the file **exports.js** could look like this:
```js
exports.doodads = require('./doodads');
exports.anotherModule = require('./someOtherFilename');
// or Node.js-style
module.exports = {
doodads: require('./doodads'),
anotherModule: require('./someOtherFilename')
};
```
Let's assume the file **exports.js** exports something else instead:
```js
module.exports = function () {
return "Hello!";
};
```
Assuming the app is mounted at `/hello`, we could now use it in other apps like this:
```js
var Foxx = require('@arangodb/foxx');
var helloExport = Foxx.requireApp('/hello');
var greeting = helloExport(); // "Hello!"
```
!SECTION Managing app dependencies
As of ArangoDB 2.6 you can define Foxx app dependencies of your Foxx app in your manifest.
Let's say you want to use the `foxx_exports_example` app from earlier in your app. Just add it to your own manifest like this:
```json
{
"name": "foxx_dependencies_example",
"version": "1.0.0",
"dependencies": {
"exportsExample": {
"name": "foxx_exports_example",
"version": "^1.0.0"
}
}
}
```
Instead of specifying the exact mount point of the app you depend on, you can now refer to it using the name `exportsExample`:
**Before:**
```js
var doodads = Foxx.requireApp(applicationContext.configuration.exportsExampleMountPath).doodads;
```
**After:**
```js
var doodads = applicationContext.dependencies.exportsExample.doodads;
```
If an app declares any dependencies,
you need to fulfill its dependencies before it becomes active.
In the meantime a fallback application will be mounted that responds to all
requests with a HTTP 500 status code indicating a server-side error.
The dependencies of a mounted app can be adjusted
from the admin frontend by clicking the *Dependencies* button in the app details
or using the **set-dependencies** command of the **foxx-manager** command-line utility.

View File

@ -1,52 +0,0 @@
!CHAPTER Folder Structure
Now we are almost ready to write some code.
Hence it is time to introduce the folder structure created by Foxx.
We still follow the example of the app installed at `/example`.
The route to reach this application via HTTP(S) is constructed with the following parts:
* The ArangoDB endpoint `<arangodb>`: (e.g. `http://localhost:8529`)
* The selected database `<db>`: (e.g. `_system`)
* The mount point `<mount>`: (e.g. `/example`)
Now the route is constructed as follows:
```
<arangodb>/_db/<db>/<mount>
http://localhost:8529/_db/_system/example
```
For the sources of the application the path on your file system the path is constructed almost the same way.
But here we need some additional information:
* The app-path `<app-path>`: (e.g. `/var/lib/arangodb-apps`)
* The selected database `<db>`: (e.g. `_system`)
* The mount point `<mount>`: (e.g. `/example`)
**Note**: You can set your app-path to an arbitrary folder using the `--javascript.app-path` startup parameter.
Now the path is constructed as follows:
```
<app-path>/_db/<db>/<mount>/APP
Linux: /var/lib/arangodb-apps/_db/_system/example/APP
Mac: /usr/local/var/lib/arangodb-apps/_db/_system/example/APP
Windows: C:\Program Files\ArangoDB\js\apps\_db\_system\example\APP
```
Before 2.5 the folder was constructed using application name and version.
That was necessary because installation was a two step process:
<!-- <div class="versionDifference"-->
1. Including the Application sources into ArangoDB (and creating the folder)
2. Mounting the application to one specific mountpoint
This caused some confusion and a lot of unnecessary administration overhead.
One had to remember which apps are just known to ArangoDB, which ones are actually executed in which version etc.
The use-case we actually introduced this staging for was heavy reuse of equal apps.
However it turned out that this is rarely the case and the overhead by having redundant sources is small compared to the improved user experience not having this staging.
So we decided to entirely remove the staging and make installation an one step process without caching old versions of an app.
This means if you now **uninstall** an application it is removed from file system.
Before 2.5 you had to **purge** the application to make sure it is removed.
<!-- </div> -->
Now you can start modifying the files located there. As a good entry you should start with the [Controller](Controller.md)

View File

@ -1,242 +0,0 @@
!CHAPTER The Manifest File
In the **manifest.json** you define the components of your application.
The content is a JSON object with the following attributes (not all
attributes are required though):
* **author**: The author name
* **configuration**: Define your app's configuration parameters.
* **contributors**: An array containing objects, each represents a contributor (with **name** and optional **email**)
* **controllers**: Map routes to FoxxControllers
* **defaultDocument**: The default document when the application's root (`/`) is called (defaults to `"index.html"`)
* **dependencies**: Map names to Foxx apps
* **description**: A short description of the application (Meta information)
* **engines**: Should be an object with **arangodb** set to the ArangoDB version your Foxx app is compatible with
* **exports**: Map names to Foxx exports
* **files**: Deliver files
* **isSystem**: Mark an application as a system application
* **keywords**: An array of keywords to help people find your Foxx app
* **lib**: Base path for all required modules
* **license**: Short form of the license (MIT, GPL...)
* **name**: Name of the application (Meta information)
* **repository**: An object with information about where you can find the repository: **type** and **url**
* **scripts**: An object with script names mapped to filenames, e.g. your app's **setup** and **teardown** scripts
* **tests**: An array of names of files containing mocha tests for the Foxx app.
* **thumbnail**: Path to a thumbnail that represents the application (Meta information)
* **version**: Current version of the application (Meta information)
If you install an application using the Foxx manager or are using the
development mode, your manifest will be checked for completeness and common errors.
You should have a look at the server log files after changing a manifest file to
get informed about potential errors in the manifest.
A more complete example for a Manifest file:
```js
{
"name": "my_website",
"version": "1.2.1",
"description": "My Website with a blog and a shop",
"thumbnail": "images/website-logo.png",
"engines": {
"arangodb": "^2.7.0"
},
"configuration": {
"currency": {
"description": "Currency symbol to use for prices in the shop.",
"default": "$",
"type": "string"
}
},
"controllers": {
"/blog": "apps/blog.js",
"/shop": "apps/shop.js"
},
"exports": "index.js",
"lib": "lib",
"files": {
"/images": "images"
},
"scripts": {
"setup": "scripts/setup.js",
"teardown": "scripts/teardown.js",
"some-maintenance-script": "scripts/prune-comments.js"
},
"tests": [
"test/**",
"test-*.js"
],
"dependencies": {
"sessions": "sessions@^1.0.0",
"systemUsers": "users",
"mailer": {
"name": "mailer-postmark",
"version": "*",
"required": false
}
}
}
```
!SUBSECTION The setup and teardown scripts
You can provide a path to a JavaScript file that prepares ArangoDB for your
application (or respectively removes it entirely).
Use the **setup** script to create all collections your application needs
and fill them with initial data if you want to.
Use the **teardown** script to remove all collections you have created.
Note: the setup script is called on each request in the development mode.
If your application needs to set up specific collections,
you should always check in the setup script whether they are already there.
The teardown script is called when an application is uninstalled.
It is good practice to drop any collections in the teardown script
that the application used exclusively, but this is not enforced.
Maybe there are reasons to keep application data even after removing an application.
It's up to you to decide what to do.
!SUBSECTION Mocha tests
You can provide test cases for your Foxx app using the [mocha test framework](http://mochajs.org/)
and an assertion library like [expect.js](https://github.com/Automattic/expect.js)
or [chai](http://chaijs.com) (or even the built-in assert module).
The **tests** array lists the relative paths of all mocha tests of your Foxx app.
In addition to regular paths, the array can also contain any patterns supported by
the [minimatch module](https://github.com/isaacs/minimatch), for example:
* glob matching: `./tests/*.js` will match all JS files in the folder "tests"
* globstar matching: `./tests/**` will match all files and subfolders of the folder "tests"
* brace expansion: `./tests/{a,b,c}.js` will match the files "a.js", "b.js" and "c.js"
For more information on the supported patterns see the minimatch documentation.
!SUBSECTION Configuration parameters
Foxx apps can define configuration parameters to make them more re-usable.
The **configuration** object maps names to configuration parameters:
* The **key** is the name under whicht the parameter will be available
on your **applicationContext.configuration** object
* The **value** is a parameter definition.
The parameter definition can have the following properties:
* **description**: a human readable description of the parameter.
* **type**: the type of the configuration parameter. Default: `"string"`.
* **default**: the default value of the configuration parameter.
* **required**: whether the parameter is required. Default: `true`
The **type** can be any of the following:
* **integer** or **int**: any finite integer number.
* **boolean** or **bool**: `true` or `false`.
* **number**: any finite decimal or integer number.
* **string**: any string value.
* **json**: any well-formed JSON value.
* **password**: like *string* but will be displayed as a masked input field in the web frontend.
If the configuration has parameters that do not specify a default value,
you need to configure the app before it becomes active.
In the meantime a fallback application will be mounted that responds to all
requests with a HTTP 500 status code indicating a server-side error.
The configuration parameters of a mounted app can be adjusted
from the admin frontend by clicking the *Configuration* button in the app details
or using the **configure** command of the **foxx-manager** command-line utility.
!SUBSECTION Defining dependencies
Foxx apps can depend on other Foxx apps to be installed on the same server.
The **dependencies** object maps aliases to Foxx apps:
* The **key** is the name under which the dependency's exports will be available
on your **applicationContext.dependencies** object.
* The **value** is a dependency definition.
The dependency definition is an object with any of the following properties:
* **name** (optional): the name of the Foxx app this app depends on.
* **version** (Default: `"*"`): a [semver](http://semver.org) version or version range of the Foxx app this app depends on.
* **required** (Default: `true`): whether the dependency is required for this app to be usable or not.
Alternatively the dependency definition can be a string using any of the following formats:
* `*` will allow using any app to be used to meet the dependency.
* `sessions` or `sessions:*` will match any app with the name `sessions`
(such as the *sessions* app in the Foxx application store).
* `sessions:1.0.0` will match the version `1.0.0` of any app with the name `sessions`.
Instead of using a specific version number, you can also use any expression supported by
the [semver](https://github.com/npm/node-semver) module.
Currently the dependency definition names and versions are not enforced in ArangoDB
but this may change in a future version.
If an app declares any required dependencies,
you need to fulfill its dependencies before it becomes active.
In the meantime a fallback application will be mounted that responds to all
requests with a HTTP 500 status code indicating a server-side error.
The dependencies of a mounted app can be adjusted
from the admin frontend by clicking the *Dependencies* button in the app details
or using the **set-dependencies** command of the **foxx-manager** command-line utility.
For more information on dependencies see the chapter on [Foxx Exports](./Exports.md).
!SUBSECTION Defining controllers
Controllers can be defined as an object mapping routes to file names:
* The **key** is the route you want to mount at
* The **value** is the path to the JavaScript file containing the
**FoxxController** you want to mount
You can add multiple controllers in one manifest this way.
If **controllers** is set to a string instead, it will be treated as the **value**
with the **key** being implicitly set to `"/"` (i.e. the root of your app's mount point).
In other words, the following:
```json
{
"controllers": "my-controllers.js"
}
```
is equivalent to this:
```js
{
"controllers": {
"/": "my-controllers.js"
}
}
```
!SUBSECTION The files
Deliver all files in a certain folder without modifying them. You can deliver
text files as well as binaries:
```js
"files": {
"/images": "images"
}
```

View File

@ -1,268 +0,0 @@
!CHAPTER Details on FoxxModel
The model doesn't know anything about the database. It is just a representation
of the data as an JavaScript object. You can add and overwrite the methods of
the prototype in your model prototype via the object you give to extend. In
your model file, export the model as **model**.
```js
var Foxx = require("@arangodb/foxx");
class TodoModel extends Foxx.Model {
// ...
}
exports.model = TodoModel;
```
A Foxx Model can be initialized with an object of attributes and their values.
There's also the possibility of annotation: If you extend the model with a
**schema** property, the model's attributes will be validated against it.
You can define attributes in the schema using the bundled **joi** library.
For more information on the syntax see [the official joi documentation](https://github.com/spumko/joi).
```js
var Foxx = require("@arangodb/foxx");
var joi = require("joi");
class PersonModel extends Foxx.Model {
// ...
}
PersonModel.prototype.schema = {
name: joi.string().required(),
age: joi.number().integer(),
active: joi.boolean().default(true)
};
exports.model = PersonModel;
```
You can also use `joi.object` schemas directly:
```js
class PersonModel extends Foxx.Model {
// ...
}
PersonModel.prototype.schema = joi.object().keys({
name: joi.string().required(),
age: joi.number().integer(),
active: joi.boolean().default(true)
});
```
This has two effects: On the one hand it provides documentation. If you annotated
your model, you can use it in the **bodyParam** method for documentation.
On the other hand it will influence the behavior of the constructor: If you provide
an object to the constructor, it will validate its attributes and set the special
**errors** property. This is especially useful if you want to to initialize
the Model from user input. On the other hand it will set the default value for all
attributes that have not been set by hand. An example:
```js
var person = new PersonModel({
name: "Pete",
admin: true
});
person.attributes // => { name: "Pete", admin: true, active: true }
person.errors // => {admin: [ValidationError: value is not allowed]}
```
The following events are emitted by a model:
- beforeCreate
- afterCreate
- beforeSave
- afterSave
- beforeUpdate
- afterUpdate
- beforeRemove
- afterRemove
Equivalent events will also be emitted by the repository handling the model.
Model lifecycle:
```js
var person = new PersonModel();
person.on('beforeCreate', function() {
var model = this;
model.fancyMethod(); // Do something fancy with the model
});
var people = new Repository(appContext.collection("people"), { model: PersonModel });
people.save(person);
// beforeCreate()
// beforeSave()
// The model is created at db
// afterSave()
// afterCreate()
people.update(person, data);
// beforeUpdate(data)
// beforeSave(data)
// The model is updated at db
// afterSave(data)
// afterUpdate(data)
people.remove(person);
// beforeRemove()
// The model is deleted at db
// afterRemove()
```
!SUBSECTION Extend
<!-- js/server/modules/@arangodb/foxx/model.js -->
`FoxxModel#extend(instanceProperties, classProperties)`
Extend the Model prototype to add or overwrite methods.
The first object contains the properties to be defined on the instance,
the second object those to be defined on the prototype.
!SUBSECTION Initialize
<!-- js/server/modules/@arangodb/foxx/model.js -->
`new FoxxModel(data)`
If you initialize a model, you can give it initial *data* as an object.
**Examples**
```js
instance = new Model({
a: 1
});
```
!SUBSECTION Get
<!-- js/server/modules/@arangodb/foxx/model.js -->
`FoxxModel#get(name)`
Get the value of an attribute
**Examples**
```js
instance = new Model({
a: 1
});
instance.get("a");
```
!SUBSECTION Set
<!-- js/server/modules/@arangodb/foxx/model.js -->
`FoxxModel#set(name, value)`
Set the value of an attribute or multiple attributes at once
**Examples**
```js
instance = new Model({
a: 1
});
instance.set("a", 2);
instance.set({
b: 2
});
```
!SUBSECTION Has
<!-- js/server/modules/@arangodb/foxx/model.js -->
`FoxxModel#has(name)`
Returns true if the attribute is set to a non-null or non-undefined value.
**Examples**
```js
instance = new Model({
a: 1
});
instance.has("a"); //=> true
instance.has("b"); //=> false
```
!SUBSECTION isValid
<!-- js/server/modules/@arangodb/foxx/model.js -->
`model.isValid`
The *isValid* flag indicates whether the model's state is currently valid.
If the model does not have a schema, it will always be considered valid.
!SUBSECTION Errors
<!-- js/server/modules/@arangodb/foxx/model.js -->
`model.errors`
The *errors* property maps the names of any invalid attributes to their
corresponding validation error.
!SUBSECTION Attributes
<!-- js/server/modules/@arangodb/foxx/model.js -->
`model.attributes`
The *attributes* property is the internal hash containing the model's state.
!SUBSECTION forDB
<!-- js/server/modules/@arangodb/foxx/model.js -->
`FoxxModel#forDB()`
Return a copy of the model which can be saved into ArangoDB
!SUBSECTION forClient
<!-- js/server/modules/@arangodb/foxx/model.js -->
`FoxxModel#forClient()`
Return a copy of the model which you can send to the client.

View File

@ -1,205 +0,0 @@
!CHAPTER Foxx Queries
This chapter describes helpers for performing AQL queries in Foxx. For a full overview of AQL syntax and semantics see the chapter on the ArangoDB Query Language (AQL).
!SECTION Raw AQL Queries
The most straightforward way to perform AQL queries is using the `db._query` API. You can learn more about this API in the [chapter on invoking AQL queries](../../Aql/Invoke.md).
**Examples**
```js
var db = require('@arangodb').db;
var console = require('console');
var result = db._query('RETURN 42').toArray();
console.log(result);
```
!SECTION AQL Template Strings
ArangoDB supports ES2015-style template strings for queries using the `aqlQuery` template helper.
First, lets have a look at how it works, then use it for a query:
**Examples**
```js
var db = require('@arangodb').db;
var console = require('console');
var isAdmin = true;
var userCollection = applicationContext.collection('users');
var key = 'testKey';
aqlQuery`FOR c IN mycollection FILTER c._key == ${key} RETURN c._key`;
var results_in = {
"query" : "FOR c IN mycollection FILTER c._key == @value0 RETURN c._key",
"bindVars" : {
"value0" : "testKey"
}
}
var usernames = db._query(aqlQuery`
FOR user IN ${userCollection}
FILTER user.isAdmin == ${isAdmin}
RETURN user
`).toArray();
console.log('usernames:', usernames);
```
!SECTION ArangoDB Query Builder
The [ArangoDB Query Builder](https://www.npmjs.org/package/aqb) NPM module comes bundled with Foxx and provides a fluid API for generating complex AQL queries while avoiding raw string concatenation.
Query Builder objects can be used in any function that would normally expect an AQL string.
For a full overview of the query builder API [see the project documentation](https://github.com/arangodb/aqbjs).
**Examples**
```js
var db = require('@arangodb').db;
var qb = require('aqb');
var console = require('console');
var isAdmin = true;
var userCollection = applicationContext.collection('users');
var usernames = db._query(qb
.for('user')
.in(userCollection)
.filter(qb(isAdmin).eq('user.isAdmin'))
.return('user')
).toArray();
console.log('usernames:', usernames);
```
!SECTION Foxx.createQuery
`Foxx.createQuery(cfg)`
Creates a query function that performs the given query and returns the result.
The returned query function optionally takes an object as its argument. If an object is provided, its properties will be used as the query's bind parameters. Any additional arguments will be passed to the transform function (or dropped if no transform function is defined).
**Parameter**
* **cfg**: an object with the following properties:
* **query**: an AQL query string or an ArangoDB Query Builder query object.
* **params** (optional): an array of parameter names.
* **context** (optional): an **applicationContext**.
* **model** (optional): a **Foxx.Model** that will be applied to the query results.
* **defaults** (optional): default values for the query's bind parameters. These can be overridden by passing a value for the same name to the query function.
* **transform** (optional): a function that will be applied to the return value.
If **cfg** is a string, it will be used as the value of **cfg.query** instead.
If a **context** is specified, the values of all collection bind parameters will be passed through the context's **collectionName** method.
Note that collection bind parameters in AQL need to be referenced with two at-signs instead of one, e.g. `@@myCollectionVar` and their parameter name needs to be prefixed with an at-sign as well, e.g. `{'@myCollectionVar': 'collection_name'}`.
If **params** is provided, the query function will accept positional arguments instead of an object. If **params** is a string, it will be treated as an array containing that string.
If both *model*** and **transform** are provided, the **transform** function will be applied to the result array _after_ the results have been converted into model instances. The **transform** function is always passed the entire result array and its return value will be returned by the query function.
**Note**: `Foxx.createQuery` provides a high-level abstraction around the underlying `db._query` API and provides an easy way to generate Foxx models from query results at the expense of hiding internals like query metadata (e.g. number of documents affected by a modification query). If you just want to perform AQL queries and don't need the abstractions provided by `Foxx.createQuery` you can just use the lower-level `db._query` API directly.
**Examples**
Basic usage example:
```js
var query = Foxx.createQuery('FOR u IN _users RETURN u.user');
var usernames = query();
```
Using bind parameters:
```js
var query = Foxx.createQuery('FOR u IN _users RETURN u[@propName]');
var usernames = query({propName: 'user'});
```
Using named bind parameters:
```js
var query = Foxx.createQuery({
query: 'FOR u IN _users RETURN u[@propName]',
params: ['propName']
);
var usernames = query('user');
```
Using models:
```js
var joi = require('joi');
var UserModel = Foxx.Model.extend({
schema: {
user: joi.string().required()
},
getUsername: function () {
return this.get('user');
}
});
var query = Foxx.createQuery({
query: 'FOR u IN _users RETURN u',
model: UserModel
});
var users = query();
var username = users[0].getUsername();
```
Using a transformation:
```js
var query = Foxx.createQuery({
query: 'FOR u IN _users SORT u.user ASC RETURN u',
transform: function (results) {
return results[0];
}
});
var user = query(); // first user by username
```
Using a transformation with extra arguments:
```js
var query = Foxx.createQuery({
query: 'FOR u IN _users SORT u.user ASC RETURN u[@propName]',
transform: function (results, uppercase) {
return uppercase ? results[0].toUpperCase() : results[0].toLowerCase();
}
});
query({propName: 'user'}, true); // username of first user in uppercase
query({propName: 'user'}, false); // username of first user in lowercase
```
Using a transformation with extra arguments (using positional arguments):
```js
var query = Foxx.createQuery({
query: 'FOR u IN _users SORT u.user ASC RETURN u[@propName]',
params: ['propName'],
transform: function (results, uppercase) {
return uppercase ? results[0].toUpperCase() : results[0].toLowerCase();
}
});
query('user', true); // username of first user in uppercase
query('user', false); // username of first user in lowercase
```
Using a transformation with extra arguments (and no query parameters):
```js
var query = Foxx.createQuery({
query: 'FOR u IN _users SORT u.user ASC RETURN u.user',
params: false, // an empty array would work, too
transform: function (results, uppercase) {
return uppercase ? results[0].toUpperCase() : results[0].toLowerCase();
}
});
query(true); // username of first user in uppercase
query(false); // username of first user in lowercase
```

Some files were not shown because too many files have changed in this diff Show More