1
0
Fork 0

Merge branch 'devel' of github.com:triAGENS/ArangoDB into devel

This commit is contained in:
Michael Hackstein 2014-07-08 08:03:12 +02:00
commit b67a016b4c
44 changed files with 1037 additions and 360 deletions

View File

@ -2,7 +2,7 @@ v2.2.0 (XXXX-XX-XX)
-------------------
* The replication methods `logger.start`, `logger.stop` and `logger.properties` are
no-ops in ArangoDB 2.2 as there is no separate replication logger anymore. Changes
no-ops in ArangoDB 2.2 as there is no separate replication logger anymore. Data changes
are logged into the write-ahead log in ArangoDB 2.2, and not separately by the
replication logger. The replication logger object is still there in ArangoDB 2.2 to
ensure backwards-compatibility, however, logging cannot be started, stopped or
@ -20,7 +20,7 @@ v2.2.0 (XXXX-XX-XX)
* INCOMPATIBLE CHANGE: replication of transactions has changed. Previously, transactions
were logged on a master in one big block and shipped to a slave in one block, too.
Now transactions will be logged and replicated as separate entries, allowing transactions
to bigger and ensure replication progress.
to be bigger and also ensure replication progress.
This change also affects the behavior of the `stop` method of the replication applier.
If the replication applier is now stopped manually using the `stop` method and later

View File

@ -9,14 +9,21 @@ data-modification operations:
- REPLACE: completely replace existing documents in a collection
- REMOVE: remove existing documents from a collection
Data-modification operations are normally combined with a *FOR* loop to
iterate over a given list of documents. It can optionally be combined with
Data-modification operations are normally combined with *FOR* loops to
iterate over a given list of documents. They can optionally be combined with
*FILTER* statements and the like.
FOR u IN users
FILTER u.status == 'not active'
UPDATE u WITH { status: 'inactive' } IN users
Though there is no need to combine a data-modification query with other
AQL operations such a *FOR* and *FILTER*. For example, the following
stripped-down update query will work, too. It will update one document
(with key *foo*) in collection *users*:
UPDATE "foo" WITH { status: 'inactive' } IN users
Data-modification queries are restricted to modifying data in a single
collection per query. That means a data-modification query cannot modify
data in multiple collections with a single query, though it is possible

View File

@ -86,11 +86,6 @@ the option *--disable-figures*.
<!-- lib/HttpServer/ApplicationEndpointServer.h -->
@startDocuBlock databaseWaitForSync
!SUBSECTION Sync properties
<!-- lib/HttpServer/ApplicationEndpointServer.h -->
@startDocuBlock databaseForceSync
!SUBSECTION Frequency
<!-- arangod/V8Server/ApplicationV8.h -->
@startDocuBlock jsGcFrequency

View File

@ -8,11 +8,13 @@ environment variable.
!SECTION General Options
!SUBSECTION General help
<!-- lib/ApplicationServer/ApplicationServer.h -->
@startDocuBlock generalHelp
!SUBSECTION Version
<!-- lib/ApplicationServer/ApplicationServer.h -->
@startDocuBlock generalVersion
!SUBSECTION Upgrade
`--upgrade`
@ -27,9 +29,7 @@ If the version number found in the database directory is lower than the version
Whether or not this option is specified, the server will always perform a version check on startup. Running the server with a non-matching version number in the VERSION file will make the server refuse to start.
!SUBSECTION Configuration
<!-- lib/ApplicationServer/ApplicationServer.h -->
@startDocuBlock configurationFilename
!SUBSECTION Daemon
@ -40,9 +40,7 @@ be set if the pid (process id) file is specified. That is, unless a value to the
parameter pid-file is given, then the server will report an error and exit.
!SUBSECTION Default Language
<!-- arangod/RestServer/ArangoServer.h -->
@startDocuBlock DefaultLanguage
!SUBSECTION Supervisor

View File

@ -16,6 +16,10 @@
<!-- js/common/modules/org/arangodb/arango-collection-common.js-->
@startDocuBlock collectionRange
!SUBSECTION Closed range
<!-- js/common/modules/org/arangodb/arango-collection-common.js-->
@startDocuBlock collectionClosedRange
!SUBSECTION Any
<!-- js/server/modules/org/arangodb/arango-collection.js-->
@startDocuBlock documentsCollectionAny

View File

@ -1,32 +1,25 @@
!CHAPTER Working with Edges using REST
!SUBSECTION Read edge
`GET /_api/edge`
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_READ
See [HTTP Interface for Documents](../HttpDocuments/README.md) for details.
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_READ_ALL
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_CREATE
!SUBSECTION Update edge
`PUT /_api/edge`
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_UPDATES
See [HTTP Interface for Documents](../HttpDocuments/README.md) for details.
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_REPLACE
!SUBSECTION Partially Update Edge
`PATCH /_api/edge`
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_DELETE
See [HTTP Interface for Documents](../HttpDocuments/README.md) for details.
!SUBSECTION Delete edge
`DELETE /_api/edge`
See [HTTP Interface for Documents](../HttpDocuments/README.md) for details.
!SUBSECTION Read edge header
`HEAD /_api/edge`
See [HTTP Interface for Documents](../HttpDocuments/README.md) for details.
<!-- arangod/RestHandler/RestEdgeHandler.cpp -->
@startDocuBlock API_EDGE_READ_HEAD
<!-- js/actions/api-edges.js -->
@startDocuBlock API_EDGE_READINOUTBOUND

View File

@ -7,63 +7,5 @@ of a documents.
!SUBSECTION Accessing BitArray Indexes from the Shell
`collection.ensureBitarray( field1, value1, ..., fieldn, valuen)`
Creates a bitarray index on documents using attributes as paths to the fields ( field1,..., fieldn). A value ( value1,..., valuen) consists of an array of possible values that the field can take. At least one field and one set of possible values must be given.
All documents, which do not have all of the attribute paths are ignored (that is, are not part of the bitarray index, they are however stored within the collection). A document which contains all of the attribute paths yet has one or more values which are not part of the defined range of values will be rejected and the document will not inserted within the collection. Note that, if a bitarray index is created subsequent to any documents inserted in the given collection, then the creation of the index will fail if one or more documents are rejected (due to attribute values being outside the designated range).
In case that the index was successfully created, the index identifier is returned.
In the example below we create a bitarray index with one field and that field can have the values of either 0 or 1. Any document which has the attribute x defined and does not have a value of 0 or 1 will be rejected and therefore not inserted within the collection. Documents without the attribute x defined will not take part in the index.
arango> arangod> db.example.ensureBitarray("x", [0,1]);
{
"id" : "2755894/3607862",
"unique" : false,
"type" : "bitarray",
"fields" : [["x", [0, 1]]],
"undefined" : false,
"isNewlyCreated" : true
}
In the example below we create a bitarray index with one field and that field can have the values of either 0, 1 or other (indicated by []). Any document which has the attribute x defined will take part in the index. Documents without the attribute x defined will not take part in the index.
arangod> db.example.ensureBitarray("x", [0,1,[]]);
{
"id" : "2755894/4263222",
"unique" : false,
"type" : "bitarray",
"fields" : [["x", [0, 1, [ ]]]],
"undefined" : false,
"isNewlyCreated" : true
}
In the example below we create a bitarray index with two fields. Field x can have the values of either 0 or 1; while field y can have the values of 2 or "a". A document which does not have both attributes x and y will not take part within the index. A document which does have both attributes x and y defined must have the values 0 or 1 for attribute x and 2 or a for attribute y, otherwise the document will not be inserted within the collection.
arangod> db.example.ensureBitarray("x", [0,1], "y", [2,"a"]);
{
"id" : "2755894/5246262",
"unique" : false,
"type" : "bitarray",
"fields" : [["x", [0, 1]], ["y", [0, 1]]],
"undefined" : false,
"isNewlyCreated" : false
}
In the example below we create a bitarray index with two fields. Field x can have the values of either 0 or 1; while field y can have the values of 2, "a" or other . A document which does not have both attributes x and y will not take part within the index. A document which does have both attributes x and y defined must have the values 0 or 1 for attribute x and any value for attribute y will be acceptable, otherwise the document will not be inserted within the collection.
arangod> db.example.ensureBitarray("x", [0,1], "y", [2,"a",[]]);
{
"id" : "2755894/5770550",
"unique" : false,
"type" : "bitarray",
"fields" : [["x", [0, 1]], ["y", [2, "a", [ ]]]],
"undefined" : false,
"isNewlyCreated" : true
}
<!--
@anchor IndexBitArrayShellEnsureBitarray
@copydetails JSF_ArangoCollection_prototype_ensureBitarray
-->
<!-- js/server/modules/org/arangodb/arango-collection.js -->
@startDocuBlock collectionEnsureBitArray

View File

@ -107,11 +107,15 @@ different usage scenarios:
find documents with are closest to a point. Document coordinates can either
be specified in two different document attributes or in a single attribute, e.g.
```
{ "latitude": 50.9406645, "longitude": 6.9599115 }
```
or
```
{ "coords": [ 50.9406645, 6.9599115 ] }
```
- fulltext index: a fulltext index can be used to index all words contained in
a specific attribute of all documents in a collection. Only words with a
@ -131,175 +135,24 @@ Currently it is not possible to index system attributes in user-defined indexes.
!SUBSECTION Collection Methods
`collection.index( index-handle)`
!SUBSECTION List of Index
<!-- arangod/V8Server/v8-vocbase.cpp -->
@startDocuBlock collectionGetIndexes
Returns the index with index-handle or null if no such index exists.
!SUBSECTION Drop index
<!-- arangod/V8Server/v8-vocbase.cpp -->
@startDocuBlock col_dropIndex
*Examples*
!SUBSECTION Existing index
<!-- arangod/V8Server/v8-vocbase.cpp -->
@startDocuBlock collectionEnsureIndex
arango> db.example.getIndexes().map(function(x) { return x.id; });
["example/0"]
arango> db.example.index("93013/0");
{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] }
!SECTION Database Methods
returns information about the indexes
!SUBSECTION Index handle
<!-- js/server/modules/org/arangodb/arango-database.js -->
@startDocuBlock IndexHandle
`getIndexes()`
Returns a list of all indexes defined for the collection.
*Examples*
arango> db.demo.getIndexes()
[
{
"id" : "demo/0",
"type" : "primary",
"fields" : [ "_id" ]
},
{
"id" : "demo/2290971",
"unique" : true,
"type" : "hash",
"fields" : [ "a" ]
},
{
"id" : "demo/2946331",
"unique" : false,
"type" : "hash",
"fields" : [ "b" ]
},
{
"id" : "demo/3077403",
"unique" : false,
"type" : "skiplist",
"fields" : [ "c" ]
}
]
drops an index
`collection.dropIndex( index)`
Drops the index. If the index does not exist, then false is returned. If the index existed and was dropped, then true is returned. Note that you cannot drop some special indexes (e.g. the primary index of a collection or the edge index of an edge collection).
`collection.dropIndex( index-handle)`
Same as above. Instead of an index an index handle can be given.
*Examples*
arango> db.example.ensureSkiplist("a", "b");
{ "id" : "example/991154", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"], "isNewlyCreated" : true }
arango> i = db.example.getIndexes();
[
{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] },
{ "id" : "example/991154", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"] }
]
arango> db.example.dropIndex(i[0])
false
arango> db.example.dropIndex(i[1].id)
true
arango> i = db.example.getIndexes();
[{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] }]
`collection.ensureIndex( index-description)`
Ensures that an index according to the index-description exists. A new index will be created if none exists with the given description.
The index-description must contain at least a type attribute. type can be one of the following values:
* hash: hash index
* skiplist: skiplist index
* fulltext: fulltext index
* bitarray: bitarray index
* geo1: geo index, with one attribute
* geo2: geo index, with two attributes
* cap: cap constraint
Other attributes may be necessary, depending on the index type.
Calling this method returns an index object. Whether or not the index object existed before the call is indicated in the return attribute isNewlyCreated.
*Examples*
arango> db.example.ensureIndex({ type: "hash", fields: [ "name" ], unique: true });
{
"id" : "example/30242599562",
"type" : "hash",
"unique" : true,
"fields" : [
"name"
],
"isNewlyCreated" : true
}
<!--
@anchor HandlingIndexesRead
@copydetails JSF_ArangoCollection_prototype_index
@CLEARPAGE
@anchor HandlingIndexesReadAll
@copydetails JS_GetIndexesVocbaseCol
@CLEARPAGE
@anchor HandlingIndexesDelete
@copydetails JS_DropIndexVocbaseCol
@CLEARPAGE
@anchor HandlingIndexesEnsure
@copydetails JS_EnsureIndexVocbaseCol
-->
!SUBSECTION Database Methods
`db._index(index-handle)`
Returns the index with index-handle or null if no such index exists.
*Examples*
arango> db.example.getIndexes().map(function(x) { return x.id; });
["example/0"]
arango> db._index("example/0");
{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] }
`db._dropIndex(index)`
Drops the index. If the index does not exists, then false is returned. If the index existed and was dropped, then true is returned. Note that you cannot drop the primary index.
`db._dropIndex(index-handle)`
Drops the index with index-handle.
*Examples*
arango> db.example.ensureSkiplist("a", "b");
{ "id" : "example/1577138", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"], "isNewlyCreated" : true }
arango> i = db.example.getIndexes();
[{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] },
{ "id" : "example/1577138", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"] }]
arango> db._dropIndex(i[0]);
false
arango> db._dropIndex(i[1].id);
true
arango> i = db.example.getIndexes();
[{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] }]
<!--
@anchor HandlingIndexesDbRead
@copydetails JSF_ArangoDatabase_prototype__index
@CLEARPAGE
@anchor HandlingIndexesDbDelete
@copydetails JSF_ArangoDatabase_prototype__dropIndex
-->
!SUBSECTION Drop index
<!-- js/server/modules/org/arangodb/arango-database.js -->
@startDocuBlock dropIndex

View File

@ -3,25 +3,54 @@
The action module provides the infrastructure for defining HTTP actions.
!SECTION Basics
!SUBSECTION Error Message
!SUBSECTION Error message
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsGetErrorMessage
!SECTION Standard HTTP Result Generators
!SUBSECTION Result Ok
!SUBSECTION Result ok
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultOk
!SUBSECTION Result Bad
!SUBSECTION Result bad
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultBad
`actions.resultNotFound(req, res, code, msg, headers)`
!SUBSECTION Result Unsupported
!SUBSECTION Result not found
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultNotFound
!SUBSECTION Result unsupported
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultUnsupported
!SUBSECTION Result Error
!SUBSECTION Result error
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultError
!SUBSECTION Result not Implemented
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultNotImplemented
!SUBSECTION Result permanent redirect
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultPermanentRedirect
!SUBSECTION Result temporary redirect
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultTemporaryRedirect
!SECTION ArangoDB Result Generators
!SUBSECTION Collection not found
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsCollectionNotFound
!SUBSECTION Index not found
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsIndexNotFound
!SUBSECTION Result exception
<!-- js/server/modules/org/arangodb/actions.js -->
@startDocuBlock actionsResultException

View File

@ -1,6 +1,235 @@
!CHAPTER Features and Improvements
The following list shows in detail which features have been added or improved in
ArangoDB 2.2. ArangoDB 2.1 also contains several bugfixes that are not listed
ArangoDB 2.2. ArangoDB 2.2 also contains several bugfixes that are not listed
here.
!SECTION AQL improvements
!SUBSECTION Data modification AQL queries
Up to including version 2.1, AQL supported data retrieval operations only.
Starting with ArangoDB version 2.2, AQL also supports the following
data modification operations:
- INSERT: insert new documents into a collection
- UPDATE: partially update existing documents in a collection
- REPLACE: completely replace existing documents in a collection
- REMOVE: remove existing documents from a collection
Data-modification operations are normally combined with other AQL
statements such as *FOR* loops and *FILTER* conditions to determine
the set of documents to operate on. For example, the following query
will find all documents in collection *users* that match a specific
condition and set their *status* variable to *inactive*:
FOR u IN users
FILTER u.status == 'not active'
UPDATE u WITH { status: 'inactive' } IN users
The following query copies all documents from collection *users* into
collection *backup*:
FOR u IN users
INSERT u IN backup
And this query removes documents from collection *backup*:
FOR doc IN backup
FILTER doc.lastModified < DATE_NOW() - 3600
REMOVE doc IN backup
For more information on data-modification queries, please refer to
[Data modification queries](../Aql/DataModification.md).
!SUBSECTION Updatable variables
Previously, the value of a variable assigned in an AQL query with the `LET` keyword
was not updatable in an AQL query. This prevented statements like the following from
being executable:
LET sum = 0
FOR v IN values
SORT v.year
LET sum = sum + v.value
RETURN { year: v.year, value: v.value, sum: sum }
!SUBSECTION Other AQL improvements
* added AQL TRANSLATE function
This function can be used to perform lookups from static lists, e.g.
LET countryNames = { US: "United States", UK: "United Kingdom", FR: "France" }
RETURN TRANSLATE("FR", countryNames)
LET lookup = { foo: "foo-replacement", bar: "bar-replacement", baz: "baz-replacement" }
RETURN TRANSLATE("foobar", lookup, "not contained!")
!SECTION Write-ahead log
All write operations in an ArangoDB server will now be automatically logged
in the server's write-ahead log. The write-ahead log is a set of append-only
logfiles, and it is used in case of a crash recovery and for replication.
Data from the write-ahead log will eventually be moved into the journals or
datafiles of collections, allowing the server to remove older write-ahead logfiles.
Cross-collection transactions in ArangoDB should benefit considerably by this
change, as less writes than in previous versions are required to ensure the data
of multiple collections are atomcially and durably committed. All data-modifying
operations inside transactions (insert, update, remove) will write their
operations into the write-ahead log directly now. In previous versions, such
operations were buffered until the commit or rollback occurred. Transactions with
multiple operations should therefore require less physical memory than in previous
versions of ArangoDB.
The data in the write-ahead log can also be used in the replication context. In
previous versions of ArangoDB, replicating from a master required turning on a
special replication logger on the master. The replication logger caused an extra
write operation into the *_replication* system collection for each actual write
operation. This extra write is now superfluous. Instead, slaves can read directly
from the master's write-ahead log to get informed about most recent data changes.
This removes the need to store data-modication operations in the *_replication*
collection altogether.
For the configuration of the write-ahead log, please refer to [Write-ahead log options](../ConfigureArango/Wal.md).
The introduction of the write-ahead log also removes the need to configure and
start the replication logger on a master. Though the replication logger object
is still available in ArangoDB 2.2 to ensure API compatibility, starting, stopping,
or configuring it will have no effect.
!SECTION Performance improvements
* Removed sorting of attribute names when in collection shaper
In previous versions of ArangoDB, adding a document with previously not-used
attribute names caused a full sort of all attribute names used in the
collection. The sorting was done to ensure fast comparisons of attribute
names in some rare edge cases, but it considerably slowed down inserts into
collections with many different or even unique attribute names.
* Specialized primary index implementation to allow faster hash table
rebuilding and reduce lookups in datafiles for the actual value of `_key`.
This also reduces the amount of random memory accesses for primary index inserts.
* Reclamation of index memory when deleting last document in collection
Deleting documents from a collection did not lead to index sizes being reduced.
Instead, the index memory was kept allocated and re-used later when a collection
was refilled with new documents. Now, index memory of primary indexes and hash
indexes is reclaimed instantly when the last document in a collection is removed.
* Prevent buffering of long print results in arangosh's and arangod's print
command
This change will emit buffered intermediate print results and discard the
output buffer to quickly deliver print results to the user, and to prevent
constructing very large buffers for large resultis.
!SECTION Miscellaneous improvements
* Added `insert` method as an alias for `save`. Documents can now be inserted into
a collection using either method:
db.test.save({ foo: "bar" });
db.test.insert({ foo: "bar" });
* Cleanup of options for data-modification operations
Many of the data-modification operations had signatures with many optional
bool parameters, e.g.:
db.test.update("foo", { bar: "baz" }, true, true, true)
db.test.replace("foo", { bar: "baz" }, true, true)
db.test.remove("foo", true, true)
db.test.save({ bar: "baz" }, true)
Such long parameter lists were unintuitive and hard to use when only one of
the optional parameters should have been set.
To make the APIs more usable, the operations now understand the following
alternative signature:
collection.update(key, update-document, options)
collection.replace(key, replacement-document, options)
collection.remove(key, options)
collection.save(document, options)
Examples:
db.test.update("foo", { bar: "baz" }, { overwrite: true, keepNull: true, waitForSync: true })
db.test.replace("foo", { bar: "baz" }, { overwrite: true, waitForSync: true })
db.test.remove("foo", { overwrite: true, waitForSync: true })
db.test.save({ bar: "baz" }, { waitForSync: true })
* Added `--overwrite` option to arangoimp
This allows removing all documents in a collection before importing into it
using arangoimp.
* Honor startup option `--server.disable-statistics` when deciding whether or not
to start periodic statistics collection jobs
Previously, the statistics collection jobs were started even if the server was
started with the `--server.disable-statistics` flag being set to `true`. Now if
the option is set to `true`, no statistics will be collected on the server.
* Disallow storing of JavaScript objects that contain JavaScript native objects
of type `Date`, `Function`, `RegExp` or `External`, e.g.
db.test.save({ foo: /bar/ });
db.test.save({ foo: new Date() });
This will now print
Error: <data> cannot be converted into JSON shape: could not shape document
Previously, objects of these types were silently converted into an empty object
(i.e. `{ }`) and no warning was issued.
To store such objects in a collection, explicitly convert them into strings
like this:
db.test.save({ foo: String(/bar/) });
db.test.save({ foo: String(new Date()) });
!SECTION Removed features
!SUBSECTION MRuby integration for arangod
ArangoDB had an experimental MRuby integration in some of the publish builds.
This wasn't continuously developed, and so it has been removed in ArangoDB 2.2.
This change has led to the following startup options being superfluous:
- `--ruby.gc-interval`
- `--ruby.action-directory`
- `--ruby.modules-path`
- `--ruby.startup-directory`
Specifying these startup options will do nothing in ArangoDB 2.2, so using these
options should be avoided from now on as they might be removed in a future version
of ArangoDB.
!SUBSECTION Removed startup options
The following startup options have been removed in ArangoDB 2.2. Specifying them
in the server's configuration file will not produce an error to make migration
easier. Still, usage of these options should be avoided as they will not have any
effect and might fully be removed in a future version of ArangoDB:
- `--database.remove-on-drop`
- `--database.force-sync-properties`
- `--random.no-seed`
- `--ruby.gc-interval`
- `--ruby.action-directory`
- `--ruby.modules-path`
- `--ruby.startup-directory`
- `--server.disable-replication-logger`

View File

@ -22,7 +22,7 @@ Some of the features and programs of ArangoDB are:
- A database daemon
- An ArangoDB shell
- Flexible data modeling
- and many more!
- And many more!
In this documentation you can inform yourself about all the functions, features and programs ArangoDB provides for you.

View File

@ -1,4 +1,4 @@
!CHAPTER Upgrading to ArangoDB 2.1
!CHAPTER Upgrading to ArangoDB 2.2
Please read the following sections if you upgrade from a previous version to
ArangoDB 2.2.
@ -8,3 +8,284 @@ cannot be used with earlier versions (e.g. ArangoDB 2.1) any
more. Upgrading a database directory cannot be reverted. Therefore
please make sure to create a full backup of your existing ArangoDB
installation before performing an upgrade.
!SECTION Database Directory Version Check and Upgrade
ArangoDB will perform a database version check at startup. When ArangoDB 2.2
encounters a database created with earlier versions of ArangoDB, it will refuse
to start. This is intentional.
The output will then look like this:
```
2014-07-07T22:04:53Z [18675] ERROR In database '_system': Database directory version (2.1) is lower than server version (2.2).
2014-07-07T22:04:53Z [18675] ERROR In database '_system': ----------------------------------------------------------------------
2014-07-07T22:04:53Z [18675] ERROR In database '_system': It seems like you have upgraded the ArangoDB binary.
2014-07-07T22:04:53Z [18675] ERROR In database '_system': If this is what you wanted to do, please restart with the
2014-07-07T22:04:53Z [18675] ERROR In database '_system': --upgrade
2014-07-07T22:04:53Z [18675] ERROR In database '_system': option to upgrade the data in the database directory.
2014-07-07T22:04:53Z [18675] ERROR In database '_system': Normally you can use the control script to upgrade your database
2014-07-07T22:04:53Z [18675] ERROR In database '_system': /etc/init.d/arangodb stop
2014-07-07T22:04:53Z [18675] ERROR In database '_system': /etc/init.d/arangodb upgrade
2014-07-07T22:04:53Z [18675] ERROR In database '_system': /etc/init.d/arangodb start
2014-07-07T22:04:53Z [18675] ERROR In database '_system': ----------------------------------------------------------------------
2014-07-07T22:04:53Z [18675] FATAL Database version check failed for '_system'. Please start the server with the --upgrade option
```
To make ArangoDB 2.2 start with a database directory created with an earlier
ArangoDB version, you may need to invoke the upgrade procedure once. This can
be done by running ArangoDB from the command line and supplying the `--upgrade`
option:
unix> arangod data --upgrade
where `data` is ArangoDB's main data directory.
Note: here the same database should be specified that is also specified when
arangod is started regularly. Please do not run the `--upgrade` command on each
individual database subfolder (named `database-<some number>`).
For example, if you regularly start your ArangoDB server with
unix> arangod mydatabasefolder
then running
unix> arangod mydatabasefolder --upgrade
will perform the upgrade for the whole ArangoDB instance, including all of its
databases.
Starting with `--upgrade` will run a database version check and perform any
necessary migrations. As usual, you should create a backup of your database
directory before performing the upgrade.
The output should look like this:
```
2014-07-07T22:11:30Z [18867] INFO In database '_system': starting upgrade from version 2.1 to 2.2.0
2014-07-07T22:11:30Z [18867] INFO In database '_system': Found 19 defined task(s), 2 task(s) to run
2014-07-07T22:11:30Z [18867] INFO In database '_system': upgrade successfully finished
2014-07-07T22:11:30Z [18867] INFO database upgrade passed
```
Please check the output the `--upgrade` run. It may produce errors, which need
to be fixed before ArangoDB can be used properly. If no errors are present or
they have been resolved, you can start ArangoDB 2.2 regularly.
!SECTION Upgrading a cluster planned in the web interface
A cluster of ArangoDB instances has to be upgraded as well. This
involves upgrading all ArangoDB instances in the cluster, as well as
running the version check on the whole running cluster in the end.
We have tried to make this procedure as painless and convenient for you.
We assume that you planned, launched and administrated a cluster using the
graphical front end in your browser. The upgrade procedure is then as
follows:
1. First shut down your cluster using the graphical front end as
usual.
2. Then upgrade all dispatcher instances on all machines in your
cluster using the version check as described above and restart them.
3. Now open the cluster dash board in your browser by pointing it to
the same dispatcher that you used to plan and launch the cluster in
the graphical front end. In addition to the usual buttons
"Relaunch", "Edit cluster plan" and "Delete cluster plan" you will
see another button marked "Upgrade and relaunch cluster".
4. Hit this button, your cluster will be upgraded and launched and
all is done for you behind the scenes. If all goes well, you will
see the usual cluster dash board after a few seconds. If there is
an error, you have to inspect the log files of your cluster
ArangoDB instances. Please let us know if you run into problems.
There is an alternative way using the `ArangoDB` shell. Instead of
steps 3. and 4. above you can launch `arangosh`, point it to the dispatcher
that you have used to plan and launch the cluster using the option
``--server.endpoint``, and execute
arangosh> require("org/arangodb/cluster").Upgrade("root","");
This upgrades the cluster and launches it, exactly as with the button
above in the graphical front end. You have to replace `"root"` with
a user name and `""` with a password that is valid for authentication
with the cluster.
!SECTION Changed behavior
!SUBSECTION Replication
The *_replication* system collection is not used anymore in ArangoDB 2.2 because all
write operations will be logged in the write-ahead log. There is no need to additionally
log operations in the *_replication* system collection. Usage of the *_replication*
system collection in user scripts is discouraged.
!SUBSECTION Replication logger
The replication methods `logger.start`, `logger.stop` and `logger.properties` are
no-ops in ArangoDB 2.2 as there is no separate replication logger anymore. Data changes
are logged into the write-ahead log in ArangoDB 2.2, and need not be separately written
to the *_replication* system collection by the replication logger.
The replication logger object is still there in ArangoDB 2.2 to ensure API
backwards-compatibility, however, starting, stopping or configuring the logger are
no-ops in ArangoDB 2.2.
This change also affects the following HTTP API methods:
- `PUT /_api/replication/logger-start`
- `PUT /_api/replication/logger-stop`
- `GET /_api/replication/logger-config`
- `PUT /_api/replication/logger-config`
The start and stop commands will do nothing, and retrieving the logger configuration
will return a dummy configuration. Setting the logger configuration does nothing and
will return the dummy configuration again.
Any user scripts that invoke the replication logger should be checked and adjusted
before performing the upgrade to 2.2.
!SUBSECTION Replication of transactions
Replication of transactions has changed in ArangoDB 2.2. Previously, transactions were
logged on the master in one big block and were shipped to a slave in one block, too.
Now transaction operations will be logged and replicated as separate entries, allowing
transactions to be bigger and also ensure replication progress.
This also means the replication format is not fully compatible between ArangoDB 2.2
and previous versions. When upgrading a master-slave pair from ArangoDB 2.1 to 2.2,
please stop operations on the master first and make sure everything has been replicated
to the slave server. Then upgrade and restart both servers.
!SUBSECTION Replication applier
This change also affects the behavior of the *stop* method of the replication applier.
If the replication applier is now stopped manually using the *stop* method and later
restarted using the *start* method, any transactions that were unfinished at the
point of stopping will be aborted on a slave, even if they later commit on the master.
In ArangoDB 2.2, stopping the replication applier manually should be avoided unless the
goal is to stop replication permanently or to do a full resync with the master anyway.
If the replication applier still must be stopped, it should be made sure that the
slave has fetched and applied all pending operations from a master, and that no
extra transactions are started on the master before the `stop` command on the slave
is executed.
Replication of transactions in ArangoDB 2.2 might also lock the involved collections on
the slave while a transaction is either committed or aborted on the master and the
change has been replicated to the slave. This change in behavior may be important for
slave servers that are used for read-scaling. In order to avoid long lasting collection
locks on the slave, transactions should be kept small.
Any user scripts that invoke the replication applier should be checked and adjusted
before performing the upgrade to 2.2.
!SUBSECTION Collection figures
The figures reported by the *collection.figures* method only reflect documents and
data contained in the journals and datafiles of collections. Documents or deletions
contained only in the write-ahead log will not influence collection figures until the
write-ahead log garbage collection kicks in and copies data over into the collections.
The figures of a collection might therefore underreport the total resource usage of
a collection.
Additionally, the attributes *lastTick* and *uncollectedLogfileEntries* have been
added to the figures. This also affects the HTTP API method *PUT /_api/collection/figures*.
Any user scripts that process collection figures should be checked and adjusted
before performing the upgrade to 2.2.
!SUBSECTION Storage of non-JSON attribute values
Previous versions of ArangoDB allowed storing JavaScript native objects of type
`Date`, `Function`, `RegExp` or `External`, e.g.
db.test.save({ foo: /bar/ });
db.test.save({ foo: new Date() });
Objects of these types were silently converted into an empty object (`{ }`) when
being saved, an no warning was issued. This led to a silent data loss.
ArangoDB 2.2 changes this, and disallows storing JavaScript native objects of
the mentioned types. When this is attempted, the operation will now fail with the
following error:
Error: <data> cannot be converted into JSON shape: could not shape document
To store such data in a collection, explicitly convert them into strings like so:
db.test.save({ foo: String(/bar/) });
db.test.save({ foo: String(new Date()) });
Please review your server-side data storage operation code (if any) before performing
the upgrade to 2.2.
!SUBSECTION AQL keywords
The following keywords have been added to AQL in ArangoDB 2.2 to support
data modification queries:
- *INSERT*
- *UPDATE*
- *REPLACE*
- *REMOVE*
- *WITH*
Unquoted usage of these keywords for attribute names in AQL queries will likely
fail in ArangoDB 2.2. If any such attribute name needs to be used in a query, it
should be enclosed in backticks to indicate the usage of a literal attribute
name.
For example, the following query will fail in ArangoDB 2.2 with a parse error:
FOR i IN foo RETURN i.remove
The attribute name *remove* needs to be quoted with backticks to indicate that
the literal *remove* is meant:
FOR i IN foo RETURN i.`remove`
Before upgrading to 2.2, please check if any of your collections or queries use
of the new keywords.
!SECTION Removed features
!SUBSECTION MRuby integration for arangod
ArangoDB had an experimental MRuby integration in some of the publish builds.
This wasn't continuously developed, and so it has been removed in ArangoDB 2.2.
This change has led to the following startup options being superfluous:
- `--ruby.gc-interval`
- `--ruby.action-directory`
- `--ruby.modules-path`
- `--ruby.startup-directory`
Specifying these startup options will do nothing in ArangoDB 2.2, so using these
options should be avoided from now on as they might be removed in a future version
of ArangoDB.
!SUBSECTION Removed startup options
The following startup options have been removed in ArangoDB 2.2. Specifying them
in the server's configuration file will not produce an error to make migration
easier. Still, usage of these options should be avoided as they will not have any
effect and might fully be removed in a future version of ArangoDB:
- `--database.remove-on-drop`
- `--database.force-sync-properties`
- `--random.no-seed`
- `--ruby.gc-interval`
- `--ruby.action-directory`
- `--ruby.modules-path`
- `--ruby.startup-directory`
- `--server.disable-replication-logger`
Before upgrading to 2.2, please check your configuration files and adjust them so
no superfluous options are used.

View File

@ -1 +0,0 @@
Please refer to Documentation/Manual/Upgrading20.md

View File

@ -620,9 +620,9 @@ void ApplicationV8::runVersionCheck (bool skip, bool perform) {
// can do this without a lock as this is the startup
for (size_t j = 0; j < _server->_databases._nrAlloc; ++j) {
TRI_vocbase_t* vocbase = (TRI_vocbase_t*) _server->_databases._table[j];
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(_server->_databases._table[j]);
if (vocbase != 0) {
if (vocbase != nullptr) {
// special check script to be run just once in first thread (not in all)
// but for all databases
v8::HandleScope scope;
@ -669,9 +669,9 @@ void ApplicationV8::runVersionCheck (bool skip, bool perform) {
// again, can do this without the lock
for (size_t j = 0; j < _server->_databases._nrAlloc; ++j) {
TRI_vocbase_t* vocbase = (TRI_vocbase_t*) _server->_databases._table[j];
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(_server->_databases._table[j]);
if (vocbase != 0) {
if (vocbase != nullptr) {
vocbase->_state = 2;
int res = TRI_ERROR_NO_ERROR;
@ -719,9 +719,9 @@ void ApplicationV8::runUpgradeCheck () {
int result = 1;
for (size_t j = 0; j < _server->_databases._nrAlloc; ++j) {
TRI_vocbase_t* vocbase = (TRI_vocbase_t*) _server->_databases._table[j];
TRI_vocbase_t* vocbase = static_cast<TRI_vocbase_t*>(_server->_databases._table[j]);
if (vocbase != 0) {
if (vocbase != nullptr) {
// special check script to be run just once in first thread (not in all)
// but for all databases
v8::HandleScope scope;

View File

@ -428,13 +428,11 @@ namespace triagens {
////////////////////////////////////////////////////////////////////////////////
/// @brief path to the directory containing the startup scripts
/// @startDocuBlock jsStartupDirectory
///
/// `--javascript.startup-directory directory`
///
/// Specifies the *directory* path to the JavaScript files used for
/// bootstraping.
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
string _startupPath;
@ -459,25 +457,21 @@ namespace triagens {
////////////////////////////////////////////////////////////////////////////////
/// @brief semicolon separated list of application directories
/// @startDocuBlock jsAppPath
/// `--javascript.app-path directory`
///
/// Specifies the *directory* path where the applications are located.
/// Multiple paths can be specified separated with commas.
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
string _appPath;
////////////////////////////////////////////////////////////////////////////////
/// @brief semicolon separated list of application directories
/// @startDocuBlock jsDevApp
/// `--javascript.dev-app-path directory`
///
/// Specifies the `directory` path where the development applications are
/// located. Multiple paths can be specified separated with commas. Never use
/// this option for production.
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
string _devAppPath;

View File

@ -5729,7 +5729,7 @@ static v8::Handle<v8::Value> JS_DatafileScanVocbaseCol (v8::Arguments const& arg
////////////////////////////////////////////////////////////////////////////////
/// @brief ensures that an index exists
/// @startDocuBlock collectionEnsureIndex
/// `collection.ensureIndex(index-description)
/// `collection.ensureIndex(index-description)`
///
/// Ensures that an index according to the *index-description* exists. A
/// new index will be created if none exists with the given description.
@ -5752,7 +5752,7 @@ static v8::Handle<v8::Value> JS_DatafileScanVocbaseCol (v8::Arguments const& arg
///
/// @EXAMPLES
///
/// @code
/// ```js
/// arango> db.example.ensureIndex({ type: "hash", fields: [ "name" ], unique: true });
/// {
/// "id" : "example/30242599562",
@ -5763,7 +5763,8 @@ static v8::Handle<v8::Value> JS_DatafileScanVocbaseCol (v8::Arguments const& arg
/// ],
/// "isNewlyCreated" : true
/// }
/// @endcode
/// ```js
///
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
@ -5856,11 +5857,9 @@ static v8::Handle<v8::Value> JS_CountVocbaseCol (v8::Arguments const& argv) {
////////////////////////////////////////////////////////////////////////////////
/// @brief returns information about the datafiles
/// @startDocuBlock collectionDatafiles
/// `collection.datafiles()`
///
/// Returns information about the datafiles. The collection must be unloaded.
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_DatafilesVocbaseCol (v8::Arguments const& argv) {
@ -6104,7 +6103,7 @@ static v8::Handle<v8::Value> DropIndexCoordinator (TRI_vocbase_col_t const* coll
////////////////////////////////////////////////////////////////////////////////
/// @brief drops an index
/// @startDocuBock col_dropIndex
/// @startDocuBlock col_dropIndex
/// `collection.dropIndex(index)`
///
/// Drops the index. If the index does not exist, then *false* is
@ -6118,7 +6117,26 @@ static v8::Handle<v8::Value> DropIndexCoordinator (TRI_vocbase_col_t const* coll
///
/// @EXAMPLES
///
/// @verbinclude shell_index-drop-index
/// ```js
/// arango> db.example.ensureSkiplist("a", "b");
/// { "id" : "example/991154", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"], "isNewlyCreated" : true }
///
/// arango> i = db.example.getIndexes();
/// [
/// { "id" : "example/0", "type" : "primary", "fields" : ["_id"] },
/// { "id" : "example/991154", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"] }
/// ]
///
/// arango> db.example.dropIndex(i[0])
/// false
///
/// arango> db.example.dropIndex(i[1].id)
/// true
///
/// arango> i = db.example.getIndexes();
/// [{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] }]
/// ```
///
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
@ -6485,13 +6503,40 @@ static v8::Handle<v8::Value> GetIndexesCoordinator (TRI_vocbase_col_t const* col
////////////////////////////////////////////////////////////////////////////////
/// @brief returns information about the indexes
/// @startDocuBlock collectionGetIndexes
/// @FUN{getIndexes()}
/// `getIndexes()`
///
/// Returns a list of all indexes defined for the collection.
///
/// @EXAMPLES
///
/// @verbinclude shell_index-read-all
/// ```js
/// [
/// {
/// "id" : "demo/0",
/// "type" : "primary",
/// "fields" : [ "_id" ]
/// },
/// {
/// "id" : "demo/2290971",
/// "unique" : true,
/// "type" : "hash",
/// "fields" : [ "a" ]
/// },
/// {
/// "id" : "demo/2946331",
/// "unique" : false,
/// "type" : "hash",
/// "fields" : [ "b" ]
/// },
/// {
/// "id" : "demo/3077403",
/// "unique" : false,
/// "type" : "skiplist",
/// "fields" : [ "c" ]
/// }
/// ]
/// ```
///
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
@ -8158,7 +8203,7 @@ static v8::Handle<v8::Value> MapGetVocBase (v8::Local<v8::String> const name,
////////////////////////////////////////////////////////////////////////////////
/// @brief changes the operation mode of the server
/// @startDocuBlock TODO
/// @startDocuBock TODO
/// `db._changeMode(<mode>)`
///
/// Sets the server to the given mode.

View File

@ -4,7 +4,6 @@ no-server = true
[database]
directory= @LOCALSTATEDIR@/lib/arangodb
# maximal-journal-size=33554432
# remove-on-drop=true
[server]
# set number of threads to 1 so we don't have concurrency

View File

@ -9,7 +9,6 @@ directory = @LOCALSTATEDIR@/lib/arangodb
# directory = @HOMEDRIVE@/@HOMEPATH@/arangodb/databases
# maximal-journal-size = 33554432
# remove-on-drop = true
[server]
# Specify the endpoint for HTTP requests by clients.
@ -50,10 +49,6 @@ startup-directory = @PKGDATADIR@/js
app-path = @LOCALSTATEDIR@/lib/arangodb-apps
# app-path = @HOMEDRIVE@/@HOMEPATH@/arangodb/apps
[ruby]
action-directory = @PKGDATADIR@/mr/actions
modules-path = @PKGDATADIR@/mr/server/modules;@PKGDATADIR@/mr/common/modules
[log]
level = info
severity = human

View File

@ -4,7 +4,6 @@ no-server = true
[database]
# directory= /var/arangodb
# maximal-journal-size = 33554432
# remove-on-drop = true
[server]
disable-authentication = true

View File

@ -1,7 +1,6 @@
[database]
# directory= /var/arangodb
# maximal-journal-size = 33554432
# remove-on-drop = true
[server]
disable-authentication = true

View File

@ -1,7 +1,6 @@
[database]
# directory= /var/arangodb
# maximal-journal-size = 33554432
# remove-on-drop = true
[server]
disable-authentication = true

View File

@ -1,7 +1,6 @@
[database]
# directory= /var/arangodb
# maximal-journal-size = 33554432
# remove-on-drop = true
[server]
disable-authentication = true

View File

@ -1,7 +1,6 @@
[database]
# directory= /var/arangodb
# maximal-journal-size = 33554432
# remove-on-drop = true
[server]
disable-authentication = true

View File

@ -40,6 +40,7 @@
"ERROR_DEBUG" : { "code" : 22, "message" : "intentional debug error" },
"ERROR_AID_NOT_FOUND" : { "code" : 23, "message" : "internal error with attribute ID in shaper" },
"ERROR_LEGEND_INCOMPLETE" : { "code" : 24, "message" : "internal error if a legend could not be created" },
"ERROR_IP_ADDRESS_INVALID" : { "code" : 25, "message" : "IP address is invalid" },
"ERROR_HTTP_BAD_PARAMETER" : { "code" : 400, "message" : "bad parameter" },
"ERROR_HTTP_UNAUTHORIZED" : { "code" : 401, "message" : "unauthorized" },
"ERROR_HTTP_FORBIDDEN" : { "code" : 403, "message" : "forbidden" },

View File

@ -19,7 +19,7 @@
BYTES_SENT_DISTRIBUTION, BYTES_RECEIVED_DISTRIBUTION, CONNECTION_TIME_DISTRIBUTION,
REQUEST_TIME_DISTRIBUTION, DEVELOPMENT_MODE, FE_DEVELOPMENT_MODE, THREAD_NUMBER, LOGFILE_PATH,
SYS_PLATFORM, SYS_EXECUTE_EXTERNAL, SYS_STATUS_EXTERNAL, SYS_KILL_EXTERNAL,
SYS_REGISTER_TASK, SYS_UNREGISTER_TASK, SYS_GET_TASK, SYS_TEST_PORT */
SYS_REGISTER_TASK, SYS_UNREGISTER_TASK, SYS_GET_TASK, SYS_TEST_PORT, SYS_IS_IP */
////////////////////////////////////////////////////////////////////////////////
/// @brief module "internal"
@ -702,6 +702,15 @@
delete SYS_TEST_PORT;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief isIP
////////////////////////////////////////////////////////////////////////////////
if (typeof SYS_IS_IP !== "undefined") {
exports.isIP = SYS_IS_IP;
delete SYS_IS_IP;
}
// -----------------------------------------------------------------------------
// --SECTION-- private functions
// -----------------------------------------------------------------------------

View File

@ -517,7 +517,7 @@ ArangoCollection.prototype.byConditionBitarray = function (index, condition) {
/// ~ db.old.ensureSkiplist("age");
/// ~ db.old.save({ age: 15 });
/// ~ db.old.save({ age: 25 });
/// ~ db.old.save({ age: 35 });
/// ~ db.old.save({ age: 30 });
/// db.old.range("age", 10, 30).toArray();
/// ~ db._drop("old")
/// @END_EXAMPLE_ARANGOSH_OUTPUT
@ -531,7 +531,7 @@ ArangoCollection.prototype.range = function (name, left, right) {
////////////////////////////////////////////////////////////////////////////////
/// @brief constructs a closed range query for a collection
/// @startDocuBlock
/// @startDocuBlock collectionClosedRange
/// `collection.closedRange(attribute, left, right)`
///
/// Selects all documents of a collection such that the *attribute* is
@ -548,7 +548,15 @@ ArangoCollection.prototype.range = function (name, left, right) {
///
/// Use *toArray* to get all documents at once:
///
/// @TINYEXAMPLE{simple-query-closed-range-to-array,convert into a list}
/// @EXAMPLE_ARANGOSH_OUTPUT{collectionClosedRange}
/// ~ db._create("old");
/// ~ db.old.ensureSkiplist("age");
/// ~ db.old.save({ age: 15 });
/// ~ db.old.save({ age: 25 });
/// ~ db.old.save({ age: 30 });
/// db.old.closedRange("age", 10, 30).toArray();
/// ~ db._drop("old")
/// @END_EXAMPLE_ARANGOSH_OUTPUT
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////

View File

@ -1626,7 +1626,8 @@ var _create = function (graphName, edgeDefinitions, orphanCollections) {
var gdb = getGraphCollection(),
err,
graphAlreadyExists = true,
collections;
collections,
result;
if (!graphName) {
err = new ArangoError();
err.errorNum = arangodb.errors.ERROR_GRAPH_CREATE_MISSING_NAME.code;
@ -1707,12 +1708,15 @@ var _create = function (graphName, edgeDefinitions, orphanCollections) {
);
orphanCollections = orphanCollections.sort();
gdb.save({
var data = gdb.save({
'orphanCollections' : orphanCollections,
'edgeDefinitions' : edgeDefinitions,
'_key' : graphName
});
return new Graph(graphName, edgeDefinitions, collections[0], collections[1], orphanCollections);
result = new Graph(graphName, edgeDefinitions, collections[0], collections[1],
orphanCollections, data._rev , data._id);
return result;
};
@ -2130,7 +2134,8 @@ var updateBindCollections = function(graph) {
/// @endDocuBlock
///
////////////////////////////////////////////////////////////////////////////////
var Graph = function(graphName, edgeDefinitions, vertexCollections, edgeCollections, orphanCollections) {
var Graph = function(graphName, edgeDefinitions, vertexCollections, edgeCollections,
orphanCollections, revision, id) {
edgeDefinitions.forEach(
function(eD, index) {
var tmp = sortEdgeDefinition(eD);
@ -2149,6 +2154,8 @@ var Graph = function(graphName, edgeDefinitions, vertexCollections, edgeCollecti
createHiddenProperty(this, "__edgeDefinitions", edgeDefinitions);
createHiddenProperty(this, "__idsToRemove", []);
createHiddenProperty(this, "__collectionsToLock", []);
createHiddenProperty(this, "__id", id);
createHiddenProperty(this, "__rev", revision);
createHiddenProperty(this, "__orphanCollections", orphanCollections);
updateBindCollections(self);
@ -2206,7 +2213,8 @@ var _graph = function(graphName) {
orphanCollections = [];
}
return new Graph(graphName, g.edgeDefinitions, collections[0], collections[1], orphanCollections);
return new Graph(graphName, g.edgeDefinitions, collections[0], collections[1], orphanCollections,
g._rev , g._id);
};
////////////////////////////////////////////////////////////////////////////////

View File

@ -370,7 +370,7 @@ Edge.prototype.getInVertex = function () {
/// @EXAMPLES
///
/// @verbinclude graph-edge-get-out-vertex
/// @startDocuBlock
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
Edge.prototype.getOutVertex = function () {

View File

@ -40,6 +40,7 @@
"ERROR_DEBUG" : { "code" : 22, "message" : "intentional debug error" },
"ERROR_AID_NOT_FOUND" : { "code" : 23, "message" : "internal error with attribute ID in shaper" },
"ERROR_LEGEND_INCOMPLETE" : { "code" : 24, "message" : "internal error if a legend could not be created" },
"ERROR_IP_ADDRESS_INVALID" : { "code" : 25, "message" : "IP address is invalid" },
"ERROR_HTTP_BAD_PARAMETER" : { "code" : 400, "message" : "bad parameter" },
"ERROR_HTTP_UNAUTHORIZED" : { "code" : 401, "message" : "unauthorized" },
"ERROR_HTTP_FORBIDDEN" : { "code" : 403, "message" : "forbidden" },

View File

@ -19,7 +19,7 @@
BYTES_SENT_DISTRIBUTION, BYTES_RECEIVED_DISTRIBUTION, CONNECTION_TIME_DISTRIBUTION,
REQUEST_TIME_DISTRIBUTION, DEVELOPMENT_MODE, FE_DEVELOPMENT_MODE, THREAD_NUMBER, LOGFILE_PATH,
SYS_PLATFORM, SYS_EXECUTE_EXTERNAL, SYS_STATUS_EXTERNAL, SYS_KILL_EXTERNAL,
SYS_REGISTER_TASK, SYS_UNREGISTER_TASK, SYS_GET_TASK, SYS_TEST_PORT */
SYS_REGISTER_TASK, SYS_UNREGISTER_TASK, SYS_GET_TASK, SYS_TEST_PORT, SYS_IS_IP */
////////////////////////////////////////////////////////////////////////////////
/// @brief module "internal"
@ -702,6 +702,15 @@
delete SYS_TEST_PORT;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief isIP
////////////////////////////////////////////////////////////////////////////////
if (typeof SYS_IS_IP !== "undefined") {
exports.isIP = SYS_IS_IP;
delete SYS_IS_IP;
}
// -----------------------------------------------------------------------------
// --SECTION-- private functions
// -----------------------------------------------------------------------------

View File

@ -516,7 +516,7 @@ ArangoCollection.prototype.byConditionBitarray = function (index, condition) {
/// ~ db.old.ensureSkiplist("age");
/// ~ db.old.save({ age: 15 });
/// ~ db.old.save({ age: 25 });
/// ~ db.old.save({ age: 35 });
/// ~ db.old.save({ age: 30 });
/// db.old.range("age", 10, 30).toArray();
/// ~ db._drop("old")
/// @END_EXAMPLE_ARANGOSH_OUTPUT
@ -530,7 +530,7 @@ ArangoCollection.prototype.range = function (name, left, right) {
////////////////////////////////////////////////////////////////////////////////
/// @brief constructs a closed range query for a collection
/// @startDocuBlock
/// @startDocuBlock collectionClosedRange
/// `collection.closedRange(attribute, left, right)`
///
/// Selects all documents of a collection such that the *attribute* is
@ -547,7 +547,15 @@ ArangoCollection.prototype.range = function (name, left, right) {
///
/// Use *toArray* to get all documents at once:
///
/// @TINYEXAMPLE{simple-query-closed-range-to-array,convert into a list}
/// @EXAMPLE_ARANGOSH_OUTPUT{collectionClosedRange}
/// ~ db._create("old");
/// ~ db.old.ensureSkiplist("age");
/// ~ db.old.save({ age: 15 });
/// ~ db.old.save({ age: 25 });
/// ~ db.old.save({ age: 30 });
/// db.old.closedRange("age", 10, 30).toArray();
/// ~ db._drop("old")
/// @END_EXAMPLE_ARANGOSH_OUTPUT
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////

View File

@ -369,7 +369,7 @@ Edge.prototype.getInVertex = function () {
/// @EXAMPLES
///
/// @verbinclude graph-edge-get-out-vertex
/// @startDocuBlock
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
Edge.prototype.getOutVertex = function () {

View File

@ -1,19 +1,19 @@
NODE
----
_stream_duplex.js: 0.10.25
_stream_passthrough.js: 0.10.25
_stream_readable.js: 0.10.25
_stream_transform.js: 0.10.25
_stream_writable.js: 0.10.25
_stream_duplex: 0.10.25
_stream_passthrough: 0.10.25
_stream_readable: 0.10.25
_stream_transform: 0.10.25
_stream_writable: 0.10.25
assert: 0.11.0
buffer: 0.11.0 (SlowBuffer)
child_process: not supported
events.js: 0.10.25
path.js: 0.11.0 (process)
punycode.js: 0.11.0
querystring.js: 0.11.0 (urlDecode)
stream.js: 0.10.25
url.js: 0.11.0
util.js: compatibility
vm.js: not supported
events: 0.10.25
path: 0.11.0 (process)
punycode: 0.11.0
querystring: 0.11.0 (urlDecode)
stream: 0.10.25
url: 0.11.0
util: compatibility
vm: not supported

View File

@ -933,7 +933,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// rejected and therefore not inserted within the collection. Documents without
/// the attribute *x* defined will not take part in the index.
///
/// @code
/// ```js
/// arango> arangod> db.example.ensureBitarray("x", [0,1]);
/// {
/// "id" : "2755894/3607862",
@ -943,7 +943,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// "undefined" : false,
/// "isNewlyCreated" : true
/// }
/// @endcode
/// ```
///
/// In the example below we create a bitarray index with one field and that
/// field can have the values of either *0*, *1* or *other* (indicated by
@ -951,7 +951,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// the index. Documents without the attribute *x* defined will not take part in
/// the index.
///
/// @code
/// ```js
/// arangod> db.example.ensureBitarray("x", [0,1,[]]);
/// {
/// "id" : "2755894/4263222",
@ -961,7 +961,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// "undefined" : false,
/// "isNewlyCreated" : true
/// }
/// @endcode
/// ```
///
/// In the example below we create a bitarray index with two fields. Field *x*
/// can have the values of either *0* or *1*; while field *y* can have the values
@ -971,7 +971,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// *2* or *1* for attribute *y*, otherwise the document will not be inserted
/// within the collection.
///
/// @code
/// ```js
/// arangod> db.example.ensureBitarray("x", [0,1], "y", [2,"a"]);
/// {
/// "id" : "2755894/5246262",
@ -981,7 +981,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// "undefined" : false,
/// "isNewlyCreated" : false
/// }
/// @endcode
/// ```
///
/// In the example below we create a bitarray index with two fields. Field *x*
/// can have the values of either *0* or *1*; while field *y* can have the
@ -991,7 +991,7 @@ ArangoCollection.prototype.updateByExample = function (example,
/// or *1* for attribute *x* and any value for attribute *y* will be acceptable,
/// otherwise the document will not be inserted within the collection.
///
/// @code
/// ```js
/// arangod> db.example.ensureBitarray("x", [0,1], "y", [2,"a",[]]);
/// {
/// "id" : "2755894/5770550",
@ -1001,7 +1001,8 @@ ArangoCollection.prototype.updateByExample = function (example,
/// "undefined" : false,
/// "isNewlyCreated" : true
/// }
/// @endcode
/// ```
///
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////

View File

@ -1,4 +1,4 @@
/*jslint indent: 2, nomen: true, maxlen: 100, sloppy: true, vars: true, white: true, plusplus: true */
/*jslint indent: 2, nomen: true, maxlen: 120, sloppy: true, vars: true, white: true, plusplus: true */
/*global require, exports, module, TRANSACTION */
////////////////////////////////////////////////////////////////////////////////
@ -334,7 +334,13 @@ ArangoDatabase.indexRegex = /^([a-zA-Z0-9\-_]+)\/([0-9]+)$/;
///
/// @EXAMPLES
///
/// @verbinclude shell_index-read-db
/// ```js
/// arango> db.example.getIndexes().map(function(x) { return x.id; });
/// ["example/0"]
/// arango> db._index("example/0");
/// { "id" : "example/0", "type" : "primary", "fields" : ["_id"] }
/// ```
///
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
@ -391,7 +397,24 @@ ArangoDatabase.prototype._index = function(id) {
///
/// @EXAMPLES
///
/// @verbinclude shell_index-drop-index-db
/// ```js
/// arango> db.example.ensureSkiplist("a", "b");
/// { "id" : "example/1577138", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"], "isNewlyCreated" : true }
///
/// arango> i = db.example.getIndexes();
/// [{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] },
/// { "id" : "example/1577138", "unique" : false, "type" : "skiplist", "fields" : ["a", "b"] }]
///
/// arango> db._dropIndex(i[0]);
/// false
///
/// arango> db._dropIndex(i[1].id);
/// true
///
/// arango> i = db.example.getIndexes();
/// [{ "id" : "example/0", "type" : "primary", "fields" : ["_id"] }]
/// ```
///
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////

View File

@ -1218,7 +1218,7 @@ Kickstarter.prototype.isHealthy = function() {
////////////////////////////////////////////////////////////////////////////////
/// @startDocuBlock JSF_Kickstarter_prototype_upgrade
///
/// `Kickstarter.upgrade()
/// `Kickstarter.upgrade()`
///
/// This performs an upgrade procedure on a cluster as described in
/// the plan which was given to the constructor. To this end, other

View File

@ -145,7 +145,6 @@ extend(Controller.prototype, {
},
////////////////////////////////////////////////////////////////////////////////
/// @startDocuBlock JSF_foxx_controller_handleRequest
///
/// The *handleRequest* method is the raw way to create a new route. You
/// probably wont call it directly, but it is used in the other request methods:
@ -153,7 +152,6 @@ extend(Controller.prototype, {
/// When defining a route you can also define a so called 'parameterized' route
/// like */goose/:barn*. In this case you can later get the value the user
/// provided for *barn* via the *params* function (see the Request object).
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
handleRequest: function (method, route, callback) {

View File

@ -27,6 +27,7 @@ ERROR_REQUEST_CANCELED,21,"canceled request","Will be raised when a request is c
ERROR_DEBUG,22,"intentional debug error","Will be raised intentionally during debugging."
ERROR_AID_NOT_FOUND,23,"internal error with attribute ID in shaper","Will be raised if an attribute ID is not found in the shaper but should have been."
ERROR_LEGEND_INCOMPLETE,24,"internal error if a legend could not be created","Will be raised if the legend generator was only given access to the shape and some sids are in the data object (inhomogeneous lists)."
ERROR_IP_ADDRESS_INVALID,25,"IP address is invalid","Will be raised when the structure of an IP address is invalid."
################################################################################
## HTTP standard errors

View File

@ -183,6 +183,206 @@ bool TRI_SetNonBlockingSocket (TRI_socket_t s) {
#endif
////////////////////////////////////////////////////////////////////////////////
/// @brief translates for IPv4 address
///
/// This code is copyright Internet Systems Consortium, Inc. ("ISC")
////////////////////////////////////////////////////////////////////////////////
int TRI_InetPton4 (const char *src, unsigned char *dst) {
static const char digits[] = "0123456789";
int saw_digit, octets, ch;
unsigned char tmp[sizeof(struct in_addr)], *tp;
if (NULL == src) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
saw_digit = 0;
octets = 0;
*(tp = tmp) = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr(digits, ch)) != NULL) {
unsigned int nw = *tp * 10 + (pch - digits);
if (saw_digit && *tp == 0) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
if (nw > 255) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
*tp = nw;
if (!saw_digit) {
if (++octets > 4) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
saw_digit = 1;
}
}
else if (ch == '.' && saw_digit) {
if (octets == 4) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
*++tp = 0;
saw_digit = 0;
}
else {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
}
if (octets < 4) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
if (NULL != dst) {
memcpy(dst, tmp, sizeof(struct in_addr));
}
return TRI_ERROR_NO_ERROR;
}
////////////////////////////////////////////////////////////////////////////////
/// @brief translates for IPv6 address
///
/// This code is copyright Internet Systems Consortium, Inc. ("ISC")
////////////////////////////////////////////////////////////////////////////////
int TRI_InetPton6 (const char *src, unsigned char *dst) {
static const char xdigits_l[] = "0123456789abcdef";
static const char xdigits_u[] = "0123456789ABCDEF";
unsigned char tmp[sizeof(struct in6_addr)], *tp, *endp, *colonp;
const char *xdigits, *curtok;
int ch, seen_xdigits;
unsigned int val;
if (NULL == src) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
memset((tp = tmp), '\0', sizeof tmp);
endp = tp + sizeof tmp;
colonp = NULL;
/* Leading :: requires some special handling. */
if (*src == ':') {
if (*++src != ':') {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
}
curtok = src;
seen_xdigits = 0;
val = 0;
while ((ch = *src++) != '\0') {
const char *pch;
if ((pch = strchr((xdigits = xdigits_l), ch)) == NULL) {
pch = strchr((xdigits = xdigits_u), ch);
}
if (pch != NULL) {
val <<= 4;
val |= (pch - xdigits);
if (++seen_xdigits > 4) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
continue;
}
if (ch == ':') {
curtok = src;
if (! seen_xdigits) {
if (colonp) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
colonp = tp;
continue;
}
else if (*src == '\0') {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
if (tp + sizeof(uint16_t) > endp) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
seen_xdigits = 0;
val = 0;
continue;
}
if (ch == '.' && ((tp + sizeof(struct in_addr)) <= endp)) {
int err = TRI_InetPton4(curtok, tp);
if (err == 0) {
tp += sizeof(struct in_addr);
seen_xdigits = 0;
break; /*%< '\\0' was seen by inet_pton4(). */
}
}
return TRI_ERROR_IP_ADDRESS_INVALID;
}
if (seen_xdigits) {
if (tp + sizeof(uint16_t) > endp) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
*tp++ = (unsigned char) (val >> 8) & 0xff;
*tp++ = (unsigned char) val & 0xff;
}
if (colonp != NULL) {
/*
* Since some memmove()'s erroneously fail to handle
* overlapping regions, we'll do the shift by hand.
*/
const int n = tp - colonp;
int i;
if (tp == endp) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
for (i = 1; i <= n; i++) {
endp[- i] = colonp[n - i];
colonp[n - i] = 0;
}
tp = endp;
}
if (tp != endp) {
return TRI_ERROR_IP_ADDRESS_INVALID;
}
if (NULL != dst) {
memcpy(dst, tmp, sizeof tmp);
}
return TRI_ERROR_NO_ERROR;
}
// -----------------------------------------------------------------------------
// --SECTION-- MODULE
// -----------------------------------------------------------------------------

View File

@ -268,6 +268,22 @@ bool TRI_SetNonBlockingSocket (TRI_socket_t);
bool TRI_SetCloseOnExecSocket (TRI_socket_t);
////////////////////////////////////////////////////////////////////////////////
/// @brief translates for IPv4 address
///
/// This code is copyright Internet Systems Consortium, Inc. ("ISC")
////////////////////////////////////////////////////////////////////////////////
int TRI_InetPton4 (const char *src, unsigned char *dst);
////////////////////////////////////////////////////////////////////////////////
/// @brief translates for IPv6 address
///
/// This code is copyright Internet Systems Consortium, Inc. ("ISC")
////////////////////////////////////////////////////////////////////////////////
int TRI_InetPton6 (const char *src, unsigned char *dst);
// -----------------------------------------------------------------------------
// --SECTION-- MODULE
// -----------------------------------------------------------------------------

View File

@ -36,6 +36,7 @@ void TRI_InitialiseErrorMessages (void) {
REG_ERROR(ERROR_DEBUG, "intentional debug error");
REG_ERROR(ERROR_AID_NOT_FOUND, "internal error with attribute ID in shaper");
REG_ERROR(ERROR_LEGEND_INCOMPLETE, "internal error if a legend could not be created");
REG_ERROR(ERROR_IP_ADDRESS_INVALID, "IP address is invalid");
REG_ERROR(ERROR_HTTP_BAD_PARAMETER, "bad parameter");
REG_ERROR(ERROR_HTTP_UNAUTHORIZED, "unauthorized");
REG_ERROR(ERROR_HTTP_FORBIDDEN, "forbidden");

View File

@ -63,6 +63,8 @@ extern "C" {
/// - 24: @LIT{internal error if a legend could not be created}
/// Will be raised if the legend generator was only given access to the shape
/// and some sids are in the data object (inhomogeneous lists).
/// - 25: @LIT{IP address is invalid}
/// Will be raised when the structure of an IP address is invalid.
/// - 400: @LIT{bad parameter}
/// Will be raised when the HTTP request does not fulfill the requirements.
/// - 401: @LIT{unauthorized}
@ -850,6 +852,16 @@ void TRI_InitialiseErrorMessages (void);
#define TRI_ERROR_LEGEND_INCOMPLETE (24)
////////////////////////////////////////////////////////////////////////////////
/// @brief 25: ERROR_IP_ADDRESS_INVALID
///
/// IP address is invalid
///
/// Will be raised when the structure of an IP address is invalid.
////////////////////////////////////////////////////////////////////////////////
#define TRI_ERROR_IP_ADDRESS_INVALID (25)
////////////////////////////////////////////////////////////////////////////////
/// @brief 400: ERROR_HTTP_BAD_PARAMETER
///

View File

@ -1764,8 +1764,7 @@ static v8::Handle<v8::Value> JS_Output (v8::Arguments const& argv) {
////////////////////////////////////////////////////////////////////////////////
/// @brief returns the current process information
/// @startDocuBlock internalProcessStatistics
/// @FUN{internal.processStatistics()}
/// `internal.processStatistics()`
///
/// Returns information about the current process:
///
@ -1795,10 +1794,9 @@ static v8::Handle<v8::Value> JS_Output (v8::Arguments const& argv) {
///
/// - virtualSize: Virtual memory size in bytes.
///
/// @EXAMPLE_ARANGOSH_OUTPUT{HIER_FEHLT_DER_NAME}
/// @EXAMPLE_ARANGOSH_OUTPUT{internalStatistics}
/// require("internal").processStat();
/// @END_EXAMPLE_ARANGOSH_OUTPUT
/// @endDocuBlock
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_ProcessStatistics (v8::Arguments const& argv) {
@ -3009,6 +3007,30 @@ static v8::Handle<v8::Value> JS_SleepAndRequeue (const v8::Arguments& args) {
return scope.Close(self);
}
////////////////////////////////////////////////////////////////////////////////
/// @brief isIP
////////////////////////////////////////////////////////////////////////////////
static v8::Handle<v8::Value> JS_IsIP (const v8::Arguments& args) {
v8::HandleScope scope;
if (args.Length() != 1) {
TRI_V8_EXCEPTION_USAGE(scope, "base64Decode(<value>)");
}
TRI_Utf8ValueNFC address(TRI_UNKNOWN_MEM_ZONE, args[0]);
if (TRI_InetPton4(*address, NULL) == TRI_ERROR_NO_ERROR) {
return scope.Close(v8::Number::New(4));
}
else if (TRI_InetPton6(*address, NULL) == 0) {
return scope.Close(v8::Number::New(6));
}
else {
return scope.Close(v8::Number::New(0));
}
}
// -----------------------------------------------------------------------------
// --SECTION-- public functions
// -----------------------------------------------------------------------------
@ -3484,6 +3506,7 @@ void TRI_InitV8Utils (v8::Handle<v8::Context> context,
TRI_AddGlobalFunctionVocbase(context, "SYS_GETLINE", JS_Getline);
TRI_AddGlobalFunctionVocbase(context, "SYS_HMAC", JS_HMAC);
TRI_AddGlobalFunctionVocbase(context, "SYS_HTTP_STATISTICS", JS_HttpStatistics);
TRI_AddGlobalFunctionVocbase(context, "SYS_IS_IP", JS_IsIP);
TRI_AddGlobalFunctionVocbase(context, "SYS_KILL_EXTERNAL", JS_KillExternal);
TRI_AddGlobalFunctionVocbase(context, "SYS_LOAD", JS_Load);
TRI_AddGlobalFunctionVocbase(context, "SYS_LOG", JS_Log);
@ -3497,9 +3520,9 @@ void TRI_InitV8Utils (v8::Handle<v8::Context> context,
TRI_AddGlobalFunctionVocbase(context, "SYS_READ64", JS_Read64);
TRI_AddGlobalFunctionVocbase(context, "SYS_SAVE", JS_Save);
TRI_AddGlobalFunctionVocbase(context, "SYS_SERVER_STATISTICS", JS_ServerStatistics);
TRI_AddGlobalFunctionVocbase(context, "SYS_SHA256", JS_Sha256);
TRI_AddGlobalFunctionVocbase(context, "SYS_SHA224", JS_Sha224);
TRI_AddGlobalFunctionVocbase(context, "SYS_SHA1", JS_Sha1);
TRI_AddGlobalFunctionVocbase(context, "SYS_SHA224", JS_Sha224);
TRI_AddGlobalFunctionVocbase(context, "SYS_SHA256", JS_Sha256);
TRI_AddGlobalFunctionVocbase(context, "SYS_SLEEP", JS_Sleep);
TRI_AddGlobalFunctionVocbase(context, "SYS_SPRINTF", JS_SPrintF);
TRI_AddGlobalFunctionVocbase(context, "SYS_STATUS_EXTERNAL", JS_StatusExternal);